mirror of
https://github.com/neondatabase/neon.git
synced 2026-03-15 06:10:36 +00:00
Compare commits
253 Commits
release
...
erik/commu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b29a63a3d2 | ||
|
|
6c8a144e25 | ||
|
|
c7761b689d | ||
|
|
eaec6e2fb4 | ||
|
|
f7e403eea1 | ||
|
|
464ed0cbc7 | ||
|
|
f55ccd2c17 | ||
|
|
c9758dc46b | ||
|
|
78c5d70b4c | ||
|
|
fc35be0397 | ||
|
|
a7a6df3d6f | ||
|
|
bfb4b0991d | ||
|
|
c18f4a52f8 | ||
|
|
48535798ba | ||
|
|
dc35bda074 | ||
|
|
e2c3c2eccb | ||
|
|
cb50291dcd | ||
|
|
10a7d49726 | ||
|
|
f765bd3677 | ||
|
|
edcdd6ca9c | ||
|
|
62af2a14e2 | ||
|
|
367d96e25b | ||
|
|
87f01a25ab | ||
|
|
56eb511618 | ||
|
|
ddeb3f3ed3 | ||
|
|
69dbad700c | ||
|
|
0d5f4dd979 | ||
|
|
1637fbce25 | ||
|
|
8cd5370c00 | ||
|
|
bceafc6c32 | ||
|
|
dcf8e0565f | ||
|
|
c14cf15b52 | ||
|
|
5da06d4129 | ||
|
|
f30c59bec9 | ||
|
|
47c099a0fb | ||
|
|
b67e8f2edc | ||
|
|
b5b1db29bb | ||
|
|
ed4652b65b | ||
|
|
60d87966b8 | ||
|
|
8db138ef64 | ||
|
|
1ee24602d5 | ||
|
|
732bd26e70 | ||
|
|
08399672be | ||
|
|
d63f1d259a | ||
|
|
4053092408 | ||
|
|
ccf88e9375 | ||
|
|
a79fd3bda7 | ||
|
|
e1b58d5d69 | ||
|
|
9ae004f3bc | ||
|
|
341c5f53d8 | ||
|
|
4b06b547c1 | ||
|
|
74e0d85a04 | ||
|
|
23ba42446b | ||
|
|
71a83daac2 | ||
|
|
1b8355a9f9 | ||
|
|
e14bb4be39 | ||
|
|
f3a6c0d8ff | ||
|
|
17ec37aab2 | ||
|
|
d6ec1f1a1c | ||
|
|
6f3fb4433f | ||
|
|
d7678df445 | ||
|
|
03d9f0ec41 | ||
|
|
56845f2da2 | ||
|
|
9a37bfdf63 | ||
|
|
4c916552e8 | ||
|
|
50fbf4ac53 | ||
|
|
cb698a3951 | ||
|
|
f6cc5cbd0c | ||
|
|
00affada26 | ||
|
|
90d3c09c24 | ||
|
|
6c398aeae7 | ||
|
|
1856bbbb9f | ||
|
|
bd46dd60a0 | ||
|
|
5f2d476a58 | ||
|
|
3231cb6138 | ||
|
|
e558e0da5c | ||
|
|
70bf2e088d | ||
|
|
da3f9ee72d | ||
|
|
88d1127bf4 | ||
|
|
794bb7a9e8 | ||
|
|
42e4e5a418 | ||
|
|
96a817fa2b | ||
|
|
e7b057f2e8 | ||
|
|
956c2f4378 | ||
|
|
3293e4685e | ||
|
|
6f8650782f | ||
|
|
14214eb853 | ||
|
|
d4b4724921 | ||
|
|
9aba9550dd | ||
|
|
375e8e5592 | ||
|
|
52c586f678 | ||
|
|
de97b73d6e | ||
|
|
d8556616c9 | ||
|
|
d8296e60e6 | ||
|
|
7263d6e2e5 | ||
|
|
86fb7b966a | ||
|
|
0c099b0944 | ||
|
|
2fe27f510d | ||
|
|
19b5618578 | ||
|
|
12dade35fa | ||
|
|
1ec63bd6bc | ||
|
|
7012b4aa90 | ||
|
|
2cc28c75be | ||
|
|
bf01145ae4 | ||
|
|
8ab8fc11a3 | ||
|
|
6f0af96a54 | ||
|
|
9913d2668a | ||
|
|
2fefece77d | ||
|
|
471191e64e | ||
|
|
f6761760a2 | ||
|
|
0bce818d5e | ||
|
|
48be1da6ef | ||
|
|
d2efc80e40 | ||
|
|
958c2577f5 | ||
|
|
175c2e11e3 | ||
|
|
efdb07e7b6 | ||
|
|
b0970b415c | ||
|
|
9d3e07ef2c | ||
|
|
7429dd711c | ||
|
|
88ac1e356b | ||
|
|
c3cb1ab98d | ||
|
|
81ac4ef43a | ||
|
|
a5b0fc560c | ||
|
|
67b04f8ab3 | ||
|
|
9d9e3cd08a | ||
|
|
97a8f4ef85 | ||
|
|
39f31957e3 | ||
|
|
924c6a6fdf | ||
|
|
7020476bf5 | ||
|
|
80e948db93 | ||
|
|
bfb30d434c | ||
|
|
f3ba201800 | ||
|
|
8b7796cbfa | ||
|
|
fdc7e9c2a4 | ||
|
|
a352d290eb | ||
|
|
8c122a1c98 | ||
|
|
74330920ee | ||
|
|
c3c136ef3a | ||
|
|
78b6da270b | ||
|
|
47664e40d4 | ||
|
|
b1e3161d4e | ||
|
|
4713715c59 | ||
|
|
1e74b52f7e | ||
|
|
e3ecdfbecc | ||
|
|
d08e553835 | ||
|
|
7fffb5b4df | ||
|
|
1fb3639170 | ||
|
|
00dfaa2eb4 | ||
|
|
ae740ca1bb | ||
|
|
24e6c68772 | ||
|
|
93a45708ff | ||
|
|
46b5c0be0b | ||
|
|
2d913ff125 | ||
|
|
e90be06d46 | ||
|
|
356ba67607 | ||
|
|
610ea22c46 | ||
|
|
1847f4de54 | ||
|
|
477648b8cd | ||
|
|
e8af3a2811 | ||
|
|
b603e3dddb | ||
|
|
83007782fd | ||
|
|
bb1e359872 | ||
|
|
ac87544e79 | ||
|
|
b6b122e07b | ||
|
|
782062014e | ||
|
|
d0b3629412 | ||
|
|
16d6898e44 | ||
|
|
f4d51c0f5c | ||
|
|
ec17ae0658 | ||
|
|
9ecce60ded | ||
|
|
e74a957045 | ||
|
|
396a16a3b2 | ||
|
|
7140a50225 | ||
|
|
68f18ccacf | ||
|
|
786888d93f | ||
|
|
255537dda1 | ||
|
|
8b494f6a24 | ||
|
|
28a61741b3 | ||
|
|
10b936bf03 | ||
|
|
2fb6164bf8 | ||
|
|
328f28dfe5 | ||
|
|
95838056da | ||
|
|
6145cfd1c2 | ||
|
|
6d451654f1 | ||
|
|
96b4de1de6 | ||
|
|
9fdf5fbb7e | ||
|
|
37c58522a2 | ||
|
|
4b6f02e47d | ||
|
|
8202c6172f | ||
|
|
69a47d789d | ||
|
|
b36f880710 | ||
|
|
745b750f33 | ||
|
|
f06bb2bbd8 | ||
|
|
b3c25418a6 | ||
|
|
33549bad1d | ||
|
|
009168d711 | ||
|
|
7c9bd542a6 | ||
|
|
014823b305 | ||
|
|
af9379ccf6 | ||
|
|
bb28109ffa | ||
|
|
60a0bec1c0 | ||
|
|
31fa7a545d | ||
|
|
ac464c5f2c | ||
|
|
0dddb1e373 | ||
|
|
3acb263e62 | ||
|
|
1e83398cdd | ||
|
|
be8ed81532 | ||
|
|
12b08c4b82 | ||
|
|
827358dd03 | ||
|
|
d367273000 | ||
|
|
e2bad5d9e9 | ||
|
|
5623e4665b | ||
|
|
8abb4dab6d | ||
|
|
731667ac37 | ||
|
|
6a1374d106 | ||
|
|
f7c908f2f0 | ||
|
|
86671e3a0b | ||
|
|
319cd74f73 | ||
|
|
0efefbf77c | ||
|
|
e6a4171fa1 | ||
|
|
0c25ea9e31 | ||
|
|
6692321026 | ||
|
|
791df28755 | ||
|
|
d20da994f4 | ||
|
|
6dbbdaae73 | ||
|
|
977bc09d2a | ||
|
|
44269fcd5e | ||
|
|
44cc648dc8 | ||
|
|
884e028a4a | ||
|
|
42df3e5453 | ||
|
|
fc743e284f | ||
|
|
d02f9a2139 | ||
|
|
083118e98e | ||
|
|
54cd2272f1 | ||
|
|
e40193e3c8 | ||
|
|
ce9f7bacc1 | ||
|
|
b7891f8fe8 | ||
|
|
5f2adaa9ad | ||
|
|
3e5e396c8d | ||
|
|
9d781c6fda | ||
|
|
cf5d038472 | ||
|
|
d785100c02 | ||
|
|
2c0d930e3d | ||
|
|
66171a117b | ||
|
|
df2806e7a0 | ||
|
|
07631692db | ||
|
|
4c77397943 | ||
|
|
7bb58be546 | ||
|
|
b5373de208 | ||
|
|
b86c610f42 | ||
|
|
0f520d79ab | ||
|
|
93eb7bb6b8 | ||
|
|
e58d0fece1 |
384
.github/workflows/benchbase_tpcc.yml
vendored
384
.github/workflows/benchbase_tpcc.yml
vendored
@@ -1,384 +0,0 @@
|
||||
name: TPC-C like benchmark using benchbase
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
# │ ┌───────────── hour (0 - 23)
|
||||
# │ │ ┌───────────── day of the month (1 - 31)
|
||||
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
|
||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||
- cron: '0 6 * * *' # run once a day at 6 AM UTC
|
||||
workflow_dispatch: # adds ability to run this manually
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -euxo pipefail {0}
|
||||
|
||||
concurrency:
|
||||
# Allow only one workflow globally because we do not want to be too noisy in production environment
|
||||
group: benchbase-tpcc-workflow
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
benchbase-tpcc:
|
||||
strategy:
|
||||
fail-fast: false # allow other variants to continue even if one fails
|
||||
matrix:
|
||||
include:
|
||||
- warehouses: 50 # defines number of warehouses and is used to compute number of terminals
|
||||
max_rate: 800 # measured max TPS at scale factor based on experiments. Adjust if performance is better/worse
|
||||
min_cu: 0.25 # simulate free tier plan (0.25 -2 CU)
|
||||
max_cu: 2
|
||||
- warehouses: 500 # serverless plan (2-8 CU)
|
||||
max_rate: 2000
|
||||
min_cu: 2
|
||||
max_cu: 8
|
||||
- warehouses: 1000 # business plan (2-16 CU)
|
||||
max_rate: 2900
|
||||
min_cu: 2
|
||||
max_cu: 16
|
||||
max-parallel: 1 # we want to run each workload size sequentially to avoid noisy neighbors
|
||||
permissions:
|
||||
contents: write
|
||||
statuses: write
|
||||
id-token: write # aws-actions/configure-aws-credentials
|
||||
env:
|
||||
PG_CONFIG: /tmp/neon/pg_install/v17/bin/pg_config
|
||||
PSQL: /tmp/neon/pg_install/v17/bin/psql
|
||||
PG_17_LIB_PATH: /tmp/neon/pg_install/v17/lib
|
||||
POSTGRES_VERSION: 17
|
||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||
timeout-minutes: 1440
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Configure AWS credentials # necessary to download artefacts
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
with:
|
||||
aws-region: eu-central-1
|
||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
||||
|
||||
- name: Download Neon artifact
|
||||
uses: ./.github/actions/download
|
||||
with:
|
||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||
path: /tmp/neon/
|
||||
prefix: latest
|
||||
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
|
||||
- name: Create Neon Project
|
||||
id: create-neon-project-tpcc
|
||||
uses: ./.github/actions/neon-project-create
|
||||
with:
|
||||
region_id: aws-us-east-2
|
||||
postgres_version: ${{ env.POSTGRES_VERSION }}
|
||||
compute_units: '[${{ matrix.min_cu }}, ${{ matrix.max_cu }}]'
|
||||
api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }}
|
||||
api_host: console.neon.tech # production (!)
|
||||
|
||||
- name: Initialize Neon project
|
||||
env:
|
||||
BENCHMARK_TPCC_CONNSTR: ${{ steps.create-neon-project-tpcc.outputs.dsn }}
|
||||
PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
run: |
|
||||
echo "Initializing Neon project with project_id: ${PROJECT_ID}"
|
||||
export LD_LIBRARY_PATH=${PG_17_LIB_PATH}
|
||||
|
||||
# Retry logic for psql connection with 1 minute sleep between attempts
|
||||
for attempt in {1..3}; do
|
||||
echo "Attempt ${attempt}/3: Creating extensions in Neon project"
|
||||
if ${PSQL} "${BENCHMARK_TPCC_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"; then
|
||||
echo "Successfully created extensions"
|
||||
break
|
||||
else
|
||||
echo "Failed to create extensions on attempt ${attempt}"
|
||||
if [ ${attempt} -lt 3 ]; then
|
||||
echo "Waiting 60 seconds before retry..."
|
||||
sleep 60
|
||||
else
|
||||
echo "All attempts failed, exiting"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "BENCHMARK_TPCC_CONNSTR=${BENCHMARK_TPCC_CONNSTR}" >> $GITHUB_ENV
|
||||
|
||||
- name: Generate BenchBase workload configuration
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
MAX_RATE: ${{ matrix.max_rate }}
|
||||
run: |
|
||||
echo "Generating BenchBase configs for warehouses: ${WAREHOUSES}, max_rate: ${MAX_RATE}"
|
||||
|
||||
# Extract hostname and password from connection string
|
||||
# Format: postgresql://username:password@hostname/database?params (no port for Neon)
|
||||
HOSTNAME=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:[^@]*@\([^/]*\)/.*|\1|p')
|
||||
PASSWORD=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:\([^@]*\)@.*|\1|p')
|
||||
|
||||
echo "Extracted hostname: ${HOSTNAME}"
|
||||
|
||||
# Use runner temp (NVMe) as working directory
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
# Copy the generator script
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py" .
|
||||
|
||||
# Generate configs and scripts
|
||||
python3 generate_workload_size.py \
|
||||
--warehouses ${WAREHOUSES} \
|
||||
--max-rate ${MAX_RATE} \
|
||||
--hostname ${HOSTNAME} \
|
||||
--password ${PASSWORD} \
|
||||
--runner-arch ${{ runner.arch }}
|
||||
|
||||
# Fix path mismatch: move generated configs and scripts to expected locations
|
||||
mv ../configs ./configs
|
||||
mv ../scripts ./scripts
|
||||
|
||||
- name: Prepare database (load data)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Loading ${WAREHOUSES} warehouses into database..."
|
||||
|
||||
# Run the loader script and capture output to log file while preserving stdout/stderr
|
||||
./scripts/load_${WAREHOUSES}_warehouses.sh 2>&1 | tee "load_${WAREHOUSES}_warehouses.log"
|
||||
|
||||
echo "Database loading completed"
|
||||
|
||||
- name: Run TPC-C benchmark (warmup phase, then benchmark at 70% of configuredmax TPS)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Running TPC-C benchmark with ${WAREHOUSES} warehouses..."
|
||||
|
||||
# Run the optimal rate benchmark
|
||||
./scripts/execute_${WAREHOUSES}_warehouses_opt_rate.sh
|
||||
|
||||
echo "Benchmark execution completed"
|
||||
|
||||
- name: Run TPC-C benchmark (warmup phase, then ramp down TPS and up again in 5 minute intervals)
|
||||
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Running TPC-C ramp-down-up with ${WAREHOUSES} warehouses..."
|
||||
|
||||
# Run the optimal rate benchmark
|
||||
./scripts/execute_${WAREHOUSES}_warehouses_ramp_up.sh
|
||||
|
||||
echo "Benchmark execution completed"
|
||||
|
||||
- name: Process results (upload to test results database and generate diagrams)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
MIN_CU: ${{ matrix.min_cu }}
|
||||
MAX_CU: ${{ matrix.max_cu }}
|
||||
PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
REVISION: ${{ github.sha }}
|
||||
PERF_DB_CONNSTR: ${{ secrets.PERF_TEST_RESULT_CONNSTR }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Creating temporary Python environment for results processing..."
|
||||
|
||||
# Create temporary virtual environment
|
||||
python3 -m venv temp_results_env
|
||||
source temp_results_env/bin/activate
|
||||
|
||||
# Install required packages in virtual environment
|
||||
pip install matplotlib pandas psycopg2-binary
|
||||
|
||||
echo "Copying results processing scripts..."
|
||||
|
||||
# Copy both processing scripts
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py" .
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py" .
|
||||
|
||||
echo "Processing load phase metrics..."
|
||||
|
||||
# Find and process load log
|
||||
LOAD_LOG=$(find . -name "load_${WAREHOUSES}_warehouses.log" -type f | head -1)
|
||||
if [ -n "$LOAD_LOG" ]; then
|
||||
echo "Processing load metrics from: $LOAD_LOG"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--load-log "$LOAD_LOG" \
|
||||
--run-type "load" \
|
||||
--warehouses "${WAREHOUSES}" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Load log file not found: load_${WAREHOUSES}_warehouses.log"
|
||||
fi
|
||||
|
||||
echo "Processing warmup results for optimal rate..."
|
||||
|
||||
# Find and process warmup results
|
||||
WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | head -1)
|
||||
WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then
|
||||
echo "Generating warmup diagram from: $WARMUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$WARMUP_CSV" \
|
||||
--output-svg "warmup_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "Warmup at max TPS"
|
||||
|
||||
echo "Uploading warmup metrics from: $WARMUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$WARMUP_JSON" \
|
||||
--results-csv "$WARMUP_CSV" \
|
||||
--run-type "warmup" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing optimal rate results..."
|
||||
|
||||
# Find and process optimal rate results
|
||||
OPTRATE_CSV=$(find results_opt_rate -name "*.results.csv" -type f | head -1)
|
||||
OPTRATE_JSON=$(find results_opt_rate -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$OPTRATE_CSV" ] && [ -n "$OPTRATE_JSON" ]; then
|
||||
echo "Generating optimal rate diagram from: $OPTRATE_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$OPTRATE_CSV" \
|
||||
--output-svg "benchmark_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "70% of max TPS"
|
||||
|
||||
echo "Uploading optimal rate metrics from: $OPTRATE_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$OPTRATE_JSON" \
|
||||
--results-csv "$OPTRATE_CSV" \
|
||||
--run-type "opt-rate" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing optimal rate results files (CSV: $OPTRATE_CSV, JSON: $OPTRATE_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing warmup 2 results for ramp down/up phase..."
|
||||
|
||||
# Find and process warmup results
|
||||
WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | tail -1)
|
||||
WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | tail -1)
|
||||
|
||||
if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then
|
||||
echo "Generating warmup diagram from: $WARMUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$WARMUP_CSV" \
|
||||
--output-svg "warmup_2_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "Warmup at max TPS"
|
||||
|
||||
echo "Uploading warmup metrics from: $WARMUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$WARMUP_JSON" \
|
||||
--results-csv "$WARMUP_CSV" \
|
||||
--run-type "warmup" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing ramp results..."
|
||||
|
||||
# Find and process ramp results
|
||||
RAMPUP_CSV=$(find results_ramp_up -name "*.results.csv" -type f | head -1)
|
||||
RAMPUP_JSON=$(find results_ramp_up -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$RAMPUP_CSV" ] && [ -n "$RAMPUP_JSON" ]; then
|
||||
echo "Generating ramp diagram from: $RAMPUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$RAMPUP_CSV" \
|
||||
--output-svg "ramp_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "ramp TPS down and up in 5 minute intervals"
|
||||
|
||||
echo "Uploading ramp metrics from: $RAMPUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$RAMPUP_JSON" \
|
||||
--results-csv "$RAMPUP_CSV" \
|
||||
--run-type "ramp-up" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing ramp results files (CSV: $RAMPUP_CSV, JSON: $RAMPUP_JSON)"
|
||||
fi
|
||||
|
||||
# Deactivate and clean up virtual environment
|
||||
deactivate
|
||||
rm -rf temp_results_env
|
||||
rm upload_results_to_perf_test_results.py
|
||||
|
||||
echo "Results processing completed and environment cleaned up"
|
||||
|
||||
- name: Set date for upload
|
||||
id: set-date
|
||||
run: echo "date=$(date +%Y-%m-%d)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Configure AWS credentials # necessary to upload results
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
with:
|
||||
aws-region: us-east-2
|
||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
role-duration-seconds: 900 # 900 is minimum value
|
||||
|
||||
- name: Upload benchmark results to S3
|
||||
env:
|
||||
S3_BUCKET: neon-public-benchmark-results
|
||||
S3_PREFIX: benchbase-tpc-c/${{ steps.set-date.outputs.date }}/${{ github.run_id }}/${{ matrix.warehouses }}-warehouses
|
||||
run: |
|
||||
echo "Redacting passwords from configuration files before upload..."
|
||||
|
||||
# Mask all passwords in XML config files
|
||||
find "${RUNNER_TEMP}/configs" -name "*.xml" -type f -exec sed -i 's|<password>[^<]*</password>|<password>redacted</password>|g' {} \;
|
||||
|
||||
echo "Uploading benchmark results to s3://${S3_BUCKET}/${S3_PREFIX}/"
|
||||
|
||||
# Upload the entire benchmark directory recursively
|
||||
aws s3 cp --only-show-errors --recursive "${RUNNER_TEMP}" s3://${S3_BUCKET}/${S3_PREFIX}/
|
||||
|
||||
echo "Upload completed"
|
||||
|
||||
- name: Delete Neon Project
|
||||
if: ${{ always() }}
|
||||
uses: ./.github/actions/neon-project-delete
|
||||
with:
|
||||
project_id: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }}
|
||||
api_host: console.neon.tech # production (!)
|
||||
46
.github/workflows/proxy-benchmark.yml
vendored
46
.github/workflows/proxy-benchmark.yml
vendored
@@ -3,7 +3,7 @@ name: Periodic proxy performance test on unit-perf-aws-arm runners
|
||||
on:
|
||||
push: # TODO: remove after testing
|
||||
branches:
|
||||
- test-proxy-bench # Runs on pushes to test-proxy-bench branch
|
||||
- test-proxy-bench # Runs on pushes to branches starting with test-proxy-bench
|
||||
# schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
statuses: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: [ self-hosted, unit-perf-aws-arm ]
|
||||
runs-on: [self-hosted, unit-perf-aws-arm]
|
||||
timeout-minutes: 60 # 1h timeout
|
||||
container:
|
||||
image: ghcr.io/neondatabase/build-tools:pinned-bookworm
|
||||
@@ -55,58 +55,30 @@ jobs:
|
||||
{
|
||||
echo "PROXY_BENCH_PATH=$PROXY_BENCH_PATH"
|
||||
echo "NEON_DIR=${RUNNER_TEMP}/neon"
|
||||
echo "NEON_PROXY_PATH=${RUNNER_TEMP}/neon/bin/proxy"
|
||||
echo "TEST_OUTPUT=${PROXY_BENCH_PATH}/test_output"
|
||||
echo ""
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
- name: Cache poetry deps
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry/virtualenvs
|
||||
key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-bookworm-${{ hashFiles('poetry.lock') }}
|
||||
|
||||
- name: Install Python deps
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: ./scripts/pysync
|
||||
|
||||
- name: show ulimits
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
ulimit -a
|
||||
|
||||
- name: Run proxy-bench
|
||||
working-directory: ${{ env.PROXY_BENCH_PATH }}
|
||||
run: ./run.sh --with-grafana --bare-metal
|
||||
run: ${PROXY_BENCH_PATH}/run.sh
|
||||
|
||||
- name: Ingest Bench Results
|
||||
- name: Ingest Bench Results # neon repo script
|
||||
if: always()
|
||||
working-directory: ${{ env.NEON_DIR }}
|
||||
run: |
|
||||
mkdir -p $TEST_OUTPUT
|
||||
python $NEON_DIR/scripts/proxy_bench_results_ingest.py --out $TEST_OUTPUT
|
||||
|
||||
- name: Push Metrics to Proxy perf database
|
||||
shell: bash -euxo pipefail {0}
|
||||
if: always()
|
||||
env:
|
||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PROXY_TEST_RESULT_CONNSTR }}"
|
||||
REPORT_FROM: $TEST_OUTPUT
|
||||
working-directory: ${{ env.NEON_DIR }}
|
||||
run: $NEON_DIR/scripts/generate_and_push_perf_report.sh
|
||||
|
||||
- name: Docker cleanup
|
||||
if: always()
|
||||
run: docker compose down
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
run: echo "Proxy bench job failed" && exit 1
|
||||
|
||||
- name: Cleanup Test Resources
|
||||
if: always()
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
# Cleanup the test resources
|
||||
if [[ -d "${TEST_OUTPUT}" ]]; then
|
||||
rm -rf ${TEST_OUTPUT}
|
||||
fi
|
||||
if [[ -d "${PROXY_BENCH_PATH}/test_output" ]]; then
|
||||
rm -rf ${PROXY_BENCH_PATH}/test_output
|
||||
fi
|
||||
run: echo "Proxy bench job failed" && exit 1
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -15,6 +15,7 @@ neon.iml
|
||||
/.neon
|
||||
/integration_tests/.neon
|
||||
compaction-suite-results.*
|
||||
pgxn/neon/communicator/communicator_bindings.h
|
||||
docker-compose/docker-compose-parallel.yml
|
||||
|
||||
# Coverage
|
||||
|
||||
115
Cargo.lock
generated
115
Cargo.lock
generated
@@ -259,6 +259,17 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3"
|
||||
|
||||
[[package]]
|
||||
name = "atomic_enum"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99e1aca718ea7b89985790c94aad72d77533063fe00bc497bb79a7c2dae6a661"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
@@ -1294,15 +1305,32 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "communicator"
|
||||
version = "0.1.0"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"atomic_enum",
|
||||
"axum",
|
||||
"bytes",
|
||||
"cbindgen",
|
||||
"clashmap",
|
||||
"http 1.3.1",
|
||||
"libc",
|
||||
"measured",
|
||||
"metrics",
|
||||
"neon-shmem",
|
||||
"nix 0.30.1",
|
||||
"pageserver_api",
|
||||
"pageserver_client_grpc",
|
||||
"pageserver_page_api",
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"strum_macros",
|
||||
"thiserror 1.0.69",
|
||||
"tokio",
|
||||
"tokio-pipe",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"uring-common",
|
||||
"utils",
|
||||
"workspace_hack",
|
||||
]
|
||||
@@ -1388,7 +1416,6 @@ dependencies = [
|
||||
"tower-http",
|
||||
"tower-otel",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
"tracing-utils",
|
||||
@@ -1643,9 +1670,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.19"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "crossterm"
|
||||
@@ -2361,6 +2388,12 @@ version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "foldhash"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.1"
|
||||
@@ -2728,6 +2761,16 @@ version = "0.15.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.4"
|
||||
source = "git+https://github.com/quantumish/hashbrown.git?rev=6610e6d#6610e6d2b1f288ef7b0709a3efefbc846395dc5e"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashlink"
|
||||
version = "0.9.1"
|
||||
@@ -3795,7 +3838,7 @@ dependencies = [
|
||||
"prometheus",
|
||||
"rand 0.9.1",
|
||||
"rand_distr",
|
||||
"twox-hash",
|
||||
"twox-hash 1.6.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3882,15 +3925,21 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
|
||||
name = "neon-shmem"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"criterion",
|
||||
"hashbrown 0.15.4",
|
||||
"libc",
|
||||
"lock_api",
|
||||
"nix 0.30.1",
|
||||
"rand 0.9.1",
|
||||
"rand_distr",
|
||||
"rustc-hash 2.1.1",
|
||||
"seahash",
|
||||
"tempfile",
|
||||
"thiserror 1.0.69",
|
||||
"twox-hash 2.1.1",
|
||||
"workspace_hack",
|
||||
"xxhash-rust",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4345,13 +4394,16 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"bytes",
|
||||
"camino",
|
||||
"clap",
|
||||
"futures",
|
||||
"hdrhistogram",
|
||||
"http 1.3.1",
|
||||
"humantime",
|
||||
"humantime-serde",
|
||||
"metrics",
|
||||
"pageserver_api",
|
||||
"pageserver_client",
|
||||
"pageserver_client_grpc",
|
||||
@@ -4441,6 +4493,7 @@ dependencies = [
|
||||
"pageserver_client",
|
||||
"pageserver_compaction",
|
||||
"pageserver_page_api",
|
||||
"peekable",
|
||||
"pem",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol",
|
||||
@@ -4454,6 +4507,7 @@ dependencies = [
|
||||
"pprof",
|
||||
"pq_proto",
|
||||
"procfs",
|
||||
"prost 0.13.5",
|
||||
"rand 0.9.1",
|
||||
"range-set-blaze",
|
||||
"regex",
|
||||
@@ -4490,7 +4544,7 @@ dependencies = [
|
||||
"tower 0.5.2",
|
||||
"tracing",
|
||||
"tracing-utils",
|
||||
"twox-hash",
|
||||
"twox-hash 1.6.3",
|
||||
"url",
|
||||
"utils",
|
||||
"uuid",
|
||||
@@ -4702,7 +4756,7 @@ dependencies = [
|
||||
"paste",
|
||||
"seq-macro",
|
||||
"thrift",
|
||||
"twox-hash",
|
||||
"twox-hash 1.6.3",
|
||||
"zstd",
|
||||
"zstd-sys",
|
||||
]
|
||||
@@ -4748,6 +4802,15 @@ dependencies = [
|
||||
"sha2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "peekable"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "225f9651e475709164f871dc2f5724956be59cb9edb055372ffeeab01ec2d20b"
|
||||
dependencies = [
|
||||
"smallvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pem"
|
||||
version = "3.0.3"
|
||||
@@ -6442,6 +6505,12 @@ version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32"
|
||||
|
||||
[[package]]
|
||||
name = "seahash"
|
||||
version = "4.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
|
||||
|
||||
[[package]]
|
||||
name = "sec1"
|
||||
version = "0.3.0"
|
||||
@@ -7589,6 +7658,16 @@ dependencies = [
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-pipe"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f213a84bffbd61b8fa0ba8a044b4bbe35d471d0b518867181e82bd5c15542784"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.10"
|
||||
@@ -7935,12 +8014,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-appender"
|
||||
version = "0.2.3"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf"
|
||||
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"thiserror 1.0.69",
|
||||
"time",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
@@ -8126,6 +8204,15 @@ dependencies = [
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "twox-hash"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b907da542cbced5261bd3256de1b3a1bf340a3d37f93425a07362a1d687de56"
|
||||
dependencies = [
|
||||
"rand 0.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typed-json"
|
||||
version = "0.1.1"
|
||||
@@ -8845,6 +8932,7 @@ dependencies = [
|
||||
"clap",
|
||||
"clap_builder",
|
||||
"const-oid",
|
||||
"criterion",
|
||||
"crypto-bigint 0.5.5",
|
||||
"der 0.7.8",
|
||||
"deranged",
|
||||
@@ -8887,7 +8975,6 @@ dependencies = [
|
||||
"num-iter",
|
||||
"num-rational",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
"p256 0.13.2",
|
||||
"parquet",
|
||||
"prettyplease",
|
||||
@@ -8995,6 +9082,12 @@ version = "0.13.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd"
|
||||
|
||||
[[package]]
|
||||
name = "xxhash-rust"
|
||||
version = "0.8.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
|
||||
|
||||
[[package]]
|
||||
name = "yansi"
|
||||
version = "1.0.1"
|
||||
|
||||
@@ -93,6 +93,7 @@ clap = { version = "4.0", features = ["derive", "env"] }
|
||||
clashmap = { version = "1.0", features = ["raw-api"] }
|
||||
comfy-table = "7.1"
|
||||
const_format = "0.2"
|
||||
crossbeam-utils = "0.8.21"
|
||||
crc32c = "0.6"
|
||||
diatomic-waker = { version = "0.2.3" }
|
||||
either = "1.8"
|
||||
@@ -152,6 +153,7 @@ parquet = { version = "53", default-features = false, features = ["zstd"] }
|
||||
parquet_derive = "53"
|
||||
pbkdf2 = { version = "0.12.1", features = ["simple", "std"] }
|
||||
pem = "3.0.3"
|
||||
peekable = "0.3.0"
|
||||
pin-project-lite = "0.2"
|
||||
pprof = { version = "0.14", features = ["criterion", "flamegraph", "frame-pointer", "prost-codec"] }
|
||||
procfs = "0.16"
|
||||
@@ -190,6 +192,7 @@ smallvec = "1.11"
|
||||
smol_str = { version = "0.2.0", features = ["serde"] }
|
||||
socket2 = "0.5"
|
||||
spki = "0.7.3"
|
||||
spin = "0.9.8"
|
||||
strum = "0.26"
|
||||
strum_macros = "0.26"
|
||||
"subtle" = "2.5.0"
|
||||
@@ -201,7 +204,6 @@ thiserror = "1.0"
|
||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms"] }
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] }
|
||||
tokio = { version = "1.43.1", features = ["macros"] }
|
||||
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
||||
tokio-io-timeout = "1.2.0"
|
||||
tokio-postgres-rustls = "0.12.0"
|
||||
tokio-rustls = { version = "0.26.0", default-features = false, features = ["tls12", "ring"]}
|
||||
@@ -222,7 +224,6 @@ tracing-log = "0.2"
|
||||
tracing-opentelemetry = "0.31"
|
||||
tracing-serde = "0.2.0"
|
||||
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
||||
tracing-appender = "0.2.3"
|
||||
try-lock = "0.2.5"
|
||||
test-log = { version = "0.2.17", default-features = false, features = ["log"] }
|
||||
twox-hash = { version = "1.6.3", default-features = false }
|
||||
@@ -241,6 +242,9 @@ x509-cert = { version = "0.2.5" }
|
||||
env_logger = "0.11"
|
||||
log = "0.4"
|
||||
|
||||
tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
||||
uring-common = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" }
|
||||
|
||||
## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed
|
||||
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" }
|
||||
|
||||
51
build-tools/package-lock.json
generated
51
build-tools/package-lock.json
generated
@@ -6,7 +6,7 @@
|
||||
"": {
|
||||
"name": "build-tools",
|
||||
"devDependencies": {
|
||||
"@redocly/cli": "1.34.5",
|
||||
"@redocly/cli": "1.34.4",
|
||||
"@sourcemeta/jsonschema": "10.0.0"
|
||||
}
|
||||
},
|
||||
@@ -472,9 +472,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@redocly/cli": {
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.5.tgz",
|
||||
"integrity": "sha512-5IEwxs7SGP5KEXjBKLU8Ffdz9by/KqNSeBk6YUVQaGxMXK//uYlTJIPntgUXbo1KAGG2d2q2XF8y4iFz6qNeiw==",
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.4.tgz",
|
||||
"integrity": "sha512-seH/GgrjSB1EeOsgJ/4Ct6Jk2N7sh12POn/7G8UQFARMyUMJpe1oHtBwT2ndfp4EFCpgBAbZ/82Iw6dwczNxEA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -484,14 +484,14 @@
|
||||
"@opentelemetry/sdk-trace-node": "1.26.0",
|
||||
"@opentelemetry/semantic-conventions": "1.27.0",
|
||||
"@redocly/config": "^0.22.0",
|
||||
"@redocly/openapi-core": "1.34.5",
|
||||
"@redocly/respect-core": "1.34.5",
|
||||
"@redocly/openapi-core": "1.34.4",
|
||||
"@redocly/respect-core": "1.34.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"chokidar": "^3.5.1",
|
||||
"colorette": "^1.2.0",
|
||||
"core-js": "^3.32.1",
|
||||
"dotenv": "16.4.7",
|
||||
"form-data": "^4.0.4",
|
||||
"form-data": "^4.0.0",
|
||||
"get-port-please": "^3.0.1",
|
||||
"glob": "^7.1.6",
|
||||
"handlebars": "^4.7.6",
|
||||
@@ -522,9 +522,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@redocly/openapi-core": {
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.5.tgz",
|
||||
"integrity": "sha512-0EbE8LRbkogtcCXU7liAyC00n9uNG9hJ+eMyHFdUsy9lB/WGqnEBgwjA9q2cyzAVcdTkQqTBBU1XePNnN3OijA==",
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.4.tgz",
|
||||
"integrity": "sha512-hf53xEgpXIgWl3b275PgZU3OTpYh1RoD2LHdIfQ1JzBNTWsiNKczTEsI/4Tmh2N1oq9YcphhSMyk3lDh85oDjg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -544,21 +544,21 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@redocly/respect-core": {
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.5.tgz",
|
||||
"integrity": "sha512-GheC/g/QFztPe9UA9LamooSplQuy9pe0Yr8XGTqkz0ahivLDl7svoy/LSQNn1QH3XGtLKwFYMfTwFR2TAYyh5Q==",
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.4.tgz",
|
||||
"integrity": "sha512-MitKyKyQpsizA4qCVv+MjXL4WltfhFQAoiKiAzrVR1Kusro3VhYb6yJuzoXjiJhR0ukLP5QOP19Vcs7qmj9dZg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "^7.6.0",
|
||||
"@redocly/ajv": "8.11.2",
|
||||
"@redocly/openapi-core": "1.34.5",
|
||||
"@redocly/openapi-core": "1.34.4",
|
||||
"better-ajv-errors": "^1.2.0",
|
||||
"colorette": "^2.0.20",
|
||||
"concat-stream": "^2.0.0",
|
||||
"cookie": "^0.7.2",
|
||||
"dotenv": "16.4.7",
|
||||
"form-data": "^4.0.4",
|
||||
"form-data": "4.0.0",
|
||||
"jest-diff": "^29.3.1",
|
||||
"jest-matcher-utils": "^29.3.1",
|
||||
"js-yaml": "4.1.0",
|
||||
@@ -582,6 +582,21 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@redocly/respect-core/node_modules/form-data": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/@sinclair/typebox": {
|
||||
"version": "0.27.8",
|
||||
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
|
||||
@@ -1330,9 +1345,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz",
|
||||
"integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "build-tools",
|
||||
"private": true,
|
||||
"devDependencies": {
|
||||
"@redocly/cli": "1.34.5",
|
||||
"@redocly/cli": "1.34.4",
|
||||
"@sourcemeta/jsonschema": "10.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ RUN case $DEBIAN_VERSION in \
|
||||
# Install newer version (3.25) from backports.
|
||||
# libstdc++-10-dev is required for plv8
|
||||
bullseye) \
|
||||
echo "deb http://archive.debian.org/debian bullseye-backports main" > /etc/apt/sources.list.d/bullseye-backports.list; \
|
||||
echo "deb http://deb.debian.org/debian bullseye-backports main" > /etc/apt/sources.list.d/bullseye-backports.list; \
|
||||
VERSION_INSTALLS="cmake/bullseye-backports cmake-data/bullseye-backports libstdc++-10-dev"; \
|
||||
;; \
|
||||
# Version-specific installs for Bookworm (PG17):
|
||||
|
||||
@@ -26,13 +26,7 @@ commands:
|
||||
- name: postgres-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
# Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see
|
||||
# https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29
|
||||
# but it's enabled by default and it doesn't filter out invalid databases, see
|
||||
# https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67
|
||||
# so if it hits one, it starts spamming logs
|
||||
# ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml'
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -26,13 +26,7 @@ commands:
|
||||
- name: postgres-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
# Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see
|
||||
# https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29
|
||||
# but it's enabled by default and it doesn't filter out invalid databases, see
|
||||
# https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67
|
||||
# so if it hits one, it starts spamming logs
|
||||
# ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml'
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -62,7 +62,6 @@ tokio-stream.workspace = true
|
||||
tonic.workspace = true
|
||||
tower-otel.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-utils.workspace = true
|
||||
|
||||
@@ -52,14 +52,8 @@ stateDiagram-v2
|
||||
Init --> Running : Started Postgres
|
||||
Running --> TerminationPendingFast : Requested termination
|
||||
Running --> TerminationPendingImmediate : Requested termination
|
||||
Running --> ConfigurationPending : Received a /configure request with spec
|
||||
Running --> RefreshConfigurationPending : Received a /refresh_configuration request, compute node will pull a new spec and reconfigure
|
||||
RefreshConfigurationPending --> RefreshConfiguration: Received compute spec and started configuration
|
||||
RefreshConfiguration --> Running : Compute has been re-configured
|
||||
RefreshConfiguration --> RefreshConfigurationPending : Configuration failed and to be retried
|
||||
TerminationPendingFast --> Terminated compute with 30s delay for cplane to inspect status
|
||||
TerminationPendingImmediate --> Terminated : Terminated compute immediately
|
||||
Failed --> RefreshConfigurationPending : Received a /refresh_configuration request
|
||||
Failed --> [*] : Compute exited
|
||||
Terminated --> [*] : Compute exited
|
||||
```
|
||||
|
||||
@@ -49,10 +49,9 @@ use compute_tools::compute::{
|
||||
BUILD_TAG, ComputeNode, ComputeNodeParams, forward_termination_signal,
|
||||
};
|
||||
use compute_tools::extension_server::get_pg_version_string;
|
||||
use compute_tools::logger::*;
|
||||
use compute_tools::params::*;
|
||||
use compute_tools::pg_isready::get_pg_isready_bin;
|
||||
use compute_tools::spec::*;
|
||||
use compute_tools::{hadron_metrics, installed_extensions, logger::*};
|
||||
use rlimit::{Resource, setrlimit};
|
||||
use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM};
|
||||
use signal_hook::iterator::Signals;
|
||||
@@ -195,19 +194,11 @@ fn main() -> Result<()> {
|
||||
.build()?;
|
||||
let _rt_guard = runtime.enter();
|
||||
|
||||
let mut log_dir = None;
|
||||
if cli.lakebase_mode {
|
||||
log_dir = std::env::var("COMPUTE_CTL_LOG_DIRECTORY").ok();
|
||||
}
|
||||
|
||||
let (tracing_provider, _file_logs_guard) = init(cli.dev, log_dir)?;
|
||||
let tracing_provider = init(cli.dev)?;
|
||||
|
||||
// enable core dumping for all child processes
|
||||
setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
|
||||
|
||||
installed_extensions::initialize_metrics();
|
||||
hadron_metrics::initialize_metrics();
|
||||
|
||||
let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?;
|
||||
|
||||
let config = get_config(&cli)?;
|
||||
@@ -235,12 +226,7 @@ fn main() -> Result<()> {
|
||||
cli.installed_extensions_collection_interval,
|
||||
)),
|
||||
pg_init_timeout: cli.pg_init_timeout.map(Duration::from_secs),
|
||||
pg_isready_bin: get_pg_isready_bin(&cli.pgbin),
|
||||
instance_id: std::env::var("INSTANCE_ID").ok(),
|
||||
lakebase_mode: cli.lakebase_mode,
|
||||
build_tag: BUILD_TAG.to_string(),
|
||||
control_plane_uri: cli.control_plane_uri,
|
||||
config_path_test_only: cli.config,
|
||||
},
|
||||
config,
|
||||
)?;
|
||||
@@ -252,14 +238,8 @@ fn main() -> Result<()> {
|
||||
deinit_and_exit(tracing_provider, exit_code);
|
||||
}
|
||||
|
||||
fn init(
|
||||
dev_mode: bool,
|
||||
log_dir: Option<String>,
|
||||
) -> Result<(
|
||||
Option<tracing_utils::Provider>,
|
||||
Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
)> {
|
||||
let (provider, file_logs_guard) = init_tracing_and_logging(DEFAULT_LOG_LEVEL, &log_dir)?;
|
||||
fn init(dev_mode: bool) -> Result<Option<tracing_utils::Provider>> {
|
||||
let provider = init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||
|
||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||
thread::spawn(move || {
|
||||
@@ -270,7 +250,7 @@ fn init(
|
||||
|
||||
info!("compute build_tag: {}", &BUILD_TAG.to_string());
|
||||
|
||||
Ok((provider, file_logs_guard))
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
fn get_config(cli: &Cli) -> Result<ComputeConfig> {
|
||||
|
||||
@@ -6,7 +6,8 @@ use compute_api::responses::{
|
||||
LfcPrewarmState, PromoteState, TlsConfig,
|
||||
};
|
||||
use compute_api::spec::{
|
||||
ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverProtocol, PgIdent,
|
||||
ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverConnectionInfo,
|
||||
PageserverProtocol, PageserverShardConnectionInfo, PageserverShardInfo, PgIdent,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use futures::future::join_all;
|
||||
@@ -21,7 +22,6 @@ use postgres::NoTls;
|
||||
use postgres::error::SqlState;
|
||||
use remote_storage::{DownloadError, RemotePath};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::ffi::OsString;
|
||||
use std::os::unix::fs::{PermissionsExt, symlink};
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Stdio};
|
||||
@@ -41,9 +41,8 @@ use utils::shard::{ShardCount, ShardIndex, ShardNumber};
|
||||
|
||||
use crate::configurator::launch_configurator;
|
||||
use crate::disk_quota::set_disk_quota;
|
||||
use crate::hadron_metrics::COMPUTE_ATTACHED;
|
||||
use crate::installed_extensions::get_installed_extensions;
|
||||
use crate::logger::{self, startup_context_from_env};
|
||||
use crate::logger::startup_context_from_env;
|
||||
use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
||||
use crate::metrics::COMPUTE_CTL_UP;
|
||||
use crate::monitor::launch_monitor;
|
||||
@@ -115,17 +114,11 @@ pub struct ComputeNodeParams {
|
||||
|
||||
/// Interval for installed extensions collection
|
||||
pub installed_extensions_collection_interval: Arc<AtomicU64>,
|
||||
/// Hadron instance ID of the compute node.
|
||||
pub instance_id: Option<String>,
|
||||
|
||||
/// Timeout of PG compute startup in the Init state.
|
||||
pub pg_init_timeout: Option<Duration>,
|
||||
// Path to the `pg_isready` binary.
|
||||
pub pg_isready_bin: String,
|
||||
pub lakebase_mode: bool,
|
||||
|
||||
pub build_tag: String,
|
||||
pub control_plane_uri: Option<String>,
|
||||
pub config_path_test_only: Option<OsString>,
|
||||
pub lakebase_mode: bool,
|
||||
}
|
||||
|
||||
type TaskHandle = Mutex<Option<JoinHandle<()>>>;
|
||||
@@ -248,7 +241,7 @@ pub struct ParsedSpec {
|
||||
pub spec: ComputeSpec,
|
||||
pub tenant_id: TenantId,
|
||||
pub timeline_id: TimelineId,
|
||||
pub pageserver_connstr: String,
|
||||
pub pageserver_conninfo: PageserverConnectionInfo,
|
||||
pub safekeeper_connstrings: Vec<String>,
|
||||
pub storage_auth_token: Option<String>,
|
||||
/// k8s dns name and port
|
||||
@@ -295,26 +288,114 @@ impl ParsedSpec {
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract PageserverConnectionInfo from a comma-separated list of libpq connection strings.
|
||||
///
|
||||
/// This is used for backwards-compatilibity, to parse the legacye `pageserver_connstr`
|
||||
/// field in the compute spec, or the 'neon.pageserver_connstring' GUC. Nowadays, the
|
||||
/// 'pageserver_connection_info' field should be used instead.
|
||||
fn extract_pageserver_conninfo_from_connstr(
|
||||
connstr: &str,
|
||||
stripe_size: Option<u32>,
|
||||
) -> Result<PageserverConnectionInfo, anyhow::Error> {
|
||||
let shard_infos: Vec<_> = connstr
|
||||
.split(',')
|
||||
.map(|connstr| PageserverShardInfo {
|
||||
pageservers: vec![PageserverShardConnectionInfo {
|
||||
id: None,
|
||||
libpq_url: Some(connstr.to_string()),
|
||||
grpc_url: None,
|
||||
}],
|
||||
})
|
||||
.collect();
|
||||
|
||||
match shard_infos.len() {
|
||||
0 => anyhow::bail!("empty connection string"),
|
||||
1 => {
|
||||
// We assume that if there's only connection string, it means "unsharded",
|
||||
// rather than a sharded system with just a single shard. The latter is
|
||||
// possible in principle, but we never do it.
|
||||
let shard_count = ShardCount::unsharded();
|
||||
let only_shard = shard_infos.first().unwrap().clone();
|
||||
let shards = vec![(ShardIndex::unsharded(), only_shard)];
|
||||
Ok(PageserverConnectionInfo {
|
||||
shard_count,
|
||||
stripe_size: None,
|
||||
shards: shards.into_iter().collect(),
|
||||
prefer_protocol: PageserverProtocol::Libpq,
|
||||
})
|
||||
}
|
||||
n => {
|
||||
if stripe_size.is_none() {
|
||||
anyhow::bail!("{n} shards but no stripe_size");
|
||||
}
|
||||
let shard_count = ShardCount(n.try_into()?);
|
||||
let shards = shard_infos
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(idx, shard_info)| {
|
||||
(
|
||||
ShardIndex {
|
||||
shard_count,
|
||||
shard_number: ShardNumber(
|
||||
idx.try_into().expect("shard number fits in u8"),
|
||||
),
|
||||
},
|
||||
shard_info,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
Ok(PageserverConnectionInfo {
|
||||
shard_count,
|
||||
stripe_size,
|
||||
shards,
|
||||
prefer_protocol: PageserverProtocol::Libpq,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ComputeSpec> for ParsedSpec {
|
||||
type Error = String;
|
||||
fn try_from(spec: ComputeSpec) -> Result<Self, String> {
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(spec: ComputeSpec) -> Result<Self, anyhow::Error> {
|
||||
// Extract the options from the spec file that are needed to connect to
|
||||
// the storage system.
|
||||
//
|
||||
// For backwards-compatibility, the top-level fields in the spec file
|
||||
// may be empty. In that case, we need to dig them from the GUCs in the
|
||||
// cluster.settings field.
|
||||
let pageserver_connstr = spec
|
||||
.pageserver_connstring
|
||||
.clone()
|
||||
.or_else(|| spec.cluster.settings.find("neon.pageserver_connstring"))
|
||||
.ok_or("pageserver connstr should be provided")?;
|
||||
// In compute specs generated by old control plane versions, the spec file might
|
||||
// be missing the `pageserver_connection_info` field. In that case, we need to dig
|
||||
// the pageserver connection info from the `pageserver_connstr` field instead, or
|
||||
// if that's missing too, from the GUC in the cluster.settings field.
|
||||
let mut pageserver_conninfo = spec.pageserver_connection_info.clone();
|
||||
if pageserver_conninfo.is_none() {
|
||||
if let Some(pageserver_connstr_field) = &spec.pageserver_connstring {
|
||||
pageserver_conninfo = Some(extract_pageserver_conninfo_from_connstr(
|
||||
pageserver_connstr_field,
|
||||
spec.shard_stripe_size,
|
||||
)?);
|
||||
}
|
||||
}
|
||||
if pageserver_conninfo.is_none() {
|
||||
if let Some(guc) = spec.cluster.settings.find("neon.pageserver_connstring") {
|
||||
let stripe_size = if let Some(guc) = spec.cluster.settings.find("neon.stripe_size")
|
||||
{
|
||||
Some(u32::from_str(&guc)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
pageserver_conninfo =
|
||||
Some(extract_pageserver_conninfo_from_connstr(&guc, stripe_size)?);
|
||||
}
|
||||
}
|
||||
let pageserver_conninfo = pageserver_conninfo.ok_or(anyhow::anyhow!(
|
||||
"pageserver connection information should be provided"
|
||||
))?;
|
||||
|
||||
// Similarly for safekeeper connection strings
|
||||
let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
|
||||
if matches!(spec.mode, ComputeMode::Primary) {
|
||||
spec.cluster
|
||||
.settings
|
||||
.find("neon.safekeepers")
|
||||
.ok_or("safekeeper connstrings should be provided")?
|
||||
.ok_or(anyhow::anyhow!("safekeeper connstrings should be provided"))?
|
||||
.split(',')
|
||||
.map(|str| str.to_string())
|
||||
.collect()
|
||||
@@ -329,22 +410,22 @@ impl TryFrom<ComputeSpec> for ParsedSpec {
|
||||
let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
|
||||
tenant_id
|
||||
} else {
|
||||
spec.cluster
|
||||
let guc = spec
|
||||
.cluster
|
||||
.settings
|
||||
.find("neon.tenant_id")
|
||||
.ok_or("tenant id should be provided")
|
||||
.map(|s| TenantId::from_str(&s))?
|
||||
.or(Err("invalid tenant id"))?
|
||||
.ok_or(anyhow::anyhow!("tenant id should be provided"))?;
|
||||
TenantId::from_str(&guc).context("invalid tenant id")?
|
||||
};
|
||||
let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
|
||||
timeline_id
|
||||
} else {
|
||||
spec.cluster
|
||||
let guc = spec
|
||||
.cluster
|
||||
.settings
|
||||
.find("neon.timeline_id")
|
||||
.ok_or("timeline id should be provided")
|
||||
.map(|s| TimelineId::from_str(&s))?
|
||||
.or(Err("invalid timeline id"))?
|
||||
.ok_or(anyhow::anyhow!("timeline id should be provided"))?;
|
||||
TimelineId::from_str(&guc).context(anyhow::anyhow!("invalid timeline id"))?
|
||||
};
|
||||
|
||||
let endpoint_storage_addr: Option<String> = spec
|
||||
@@ -358,7 +439,7 @@ impl TryFrom<ComputeSpec> for ParsedSpec {
|
||||
|
||||
let res = ParsedSpec {
|
||||
spec,
|
||||
pageserver_connstr,
|
||||
pageserver_conninfo,
|
||||
safekeeper_connstrings,
|
||||
storage_auth_token,
|
||||
tenant_id,
|
||||
@@ -368,7 +449,7 @@ impl TryFrom<ComputeSpec> for ParsedSpec {
|
||||
};
|
||||
|
||||
// Now check validity of the parsed specification
|
||||
res.validate()?;
|
||||
res.validate().map_err(anyhow::Error::msg)?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -494,7 +575,6 @@ impl ComputeNode {
|
||||
port: this.params.external_http_port,
|
||||
config: this.compute_ctl_config.clone(),
|
||||
compute_id: this.params.compute_id.clone(),
|
||||
instance_id: this.params.instance_id.clone(),
|
||||
}
|
||||
.launch(&this);
|
||||
|
||||
@@ -1059,12 +1139,10 @@ impl ComputeNode {
|
||||
fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> {
|
||||
let spec = compute_state.pspec.as_ref().expect("spec must be set");
|
||||
|
||||
let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
|
||||
let started = Instant::now();
|
||||
|
||||
let (connected, size) = match PageserverProtocol::from_connstring(shard0_connstr)? {
|
||||
PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?,
|
||||
let (connected, size) = match spec.pageserver_conninfo.prefer_protocol {
|
||||
PageserverProtocol::Grpc => self.try_get_basebackup_grpc(spec, lsn)?,
|
||||
PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?,
|
||||
};
|
||||
|
||||
self.fix_zenith_signal_neon_signal()?;
|
||||
@@ -1102,23 +1180,32 @@ impl ComputeNode {
|
||||
/// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when
|
||||
/// the connection was established, and the (compressed) size of the basebackup.
|
||||
fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
|
||||
let shard0_connstr = spec
|
||||
.pageserver_connstr
|
||||
.split(',')
|
||||
.next()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let shard_index = match spec.pageserver_connstr.split(',').count() as u8 {
|
||||
0 | 1 => ShardIndex::unsharded(),
|
||||
count => ShardIndex::new(ShardNumber(0), ShardCount(count)),
|
||||
let shard0_index = ShardIndex {
|
||||
shard_number: ShardNumber(0),
|
||||
shard_count: spec.pageserver_conninfo.shard_count,
|
||||
};
|
||||
let shard0 = spec
|
||||
.pageserver_conninfo
|
||||
.shards
|
||||
.get(&shard0_index)
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("shard connection info missing for shard {}", shard0_index)
|
||||
})?;
|
||||
let pageserver = shard0
|
||||
.pageservers
|
||||
.first()
|
||||
.expect("must have at least one pageserver");
|
||||
let shard0_url = pageserver
|
||||
.grpc_url
|
||||
.clone()
|
||||
.expect("no grpc_url for shard 0");
|
||||
|
||||
let (reader, connected) = tokio::runtime::Handle::current().block_on(async move {
|
||||
let mut client = page_api::Client::connect(
|
||||
shard0_connstr,
|
||||
shard0_url,
|
||||
spec.tenant_id,
|
||||
spec.timeline_id,
|
||||
shard_index,
|
||||
shard0_index,
|
||||
spec.storage_auth_token.clone(),
|
||||
None, // NB: base backups use payload compression
|
||||
)
|
||||
@@ -1150,8 +1237,26 @@ impl ComputeNode {
|
||||
/// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp
|
||||
/// when the connection was established, and the (compressed) size of the basebackup.
|
||||
fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> {
|
||||
let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap();
|
||||
let mut config = postgres::Config::from_str(shard0_connstr)?;
|
||||
let shard0_index = ShardIndex {
|
||||
shard_number: ShardNumber(0),
|
||||
shard_count: spec.pageserver_conninfo.shard_count,
|
||||
};
|
||||
let shard0 = spec
|
||||
.pageserver_conninfo
|
||||
.shards
|
||||
.get(&shard0_index)
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("shard connection info missing for shard {}", shard0_index)
|
||||
})?;
|
||||
let pageserver = shard0
|
||||
.pageservers
|
||||
.first()
|
||||
.expect("must have at least one pageserver");
|
||||
let shard0_connstr = pageserver
|
||||
.libpq_url
|
||||
.clone()
|
||||
.expect("no libpq_url for shard 0");
|
||||
let mut config = postgres::Config::from_str(&shard0_connstr)?;
|
||||
|
||||
// Use the storage auth token from the config file, if given.
|
||||
// Note: this overrides any password set in the connection string.
|
||||
@@ -1237,10 +1342,7 @@ impl ComputeNode {
|
||||
return result;
|
||||
}
|
||||
Err(ref e) if attempts < max_attempts => {
|
||||
warn!(
|
||||
"Failed to get basebackup: {} (attempt {}/{})",
|
||||
e, attempts, max_attempts
|
||||
);
|
||||
warn!("Failed to get basebackup: {e:?} (attempt {attempts}/{max_attempts})");
|
||||
std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64));
|
||||
retry_period_ms *= 1.5;
|
||||
}
|
||||
@@ -1448,16 +1550,8 @@ impl ComputeNode {
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"getting basebackup@{} from pageserver {}",
|
||||
lsn, &pspec.pageserver_connstr
|
||||
);
|
||||
self.get_basebackup(compute_state, lsn).with_context(|| {
|
||||
format!(
|
||||
"failed to get basebackup@{} from pageserver {}",
|
||||
lsn, &pspec.pageserver_connstr
|
||||
)
|
||||
})?;
|
||||
self.get_basebackup(compute_state, lsn)
|
||||
.with_context(|| format!("failed to get basebackup@{lsn}"))?;
|
||||
|
||||
// Update pg_hba.conf received with basebackup.
|
||||
update_pg_hba(pgdata_path, None)?;
|
||||
@@ -1794,34 +1888,6 @@ impl ComputeNode {
|
||||
Ok::<(), anyhow::Error>(())
|
||||
}
|
||||
|
||||
// Signal to the configurator to refresh the configuration by pulling a new spec from the HCC.
|
||||
// Note that this merely triggers a notification on a condition variable the configurator thread
|
||||
// waits on. The configurator thread (in configurator.rs) pulls the new spec from the HCC and
|
||||
// applies it.
|
||||
pub async fn signal_refresh_configuration(&self) -> Result<()> {
|
||||
let states_allowing_configuration_refresh = [
|
||||
ComputeStatus::Running,
|
||||
ComputeStatus::Failed,
|
||||
ComputeStatus::RefreshConfigurationPending,
|
||||
];
|
||||
|
||||
let mut state = self.state.lock().expect("state lock poisoned");
|
||||
if states_allowing_configuration_refresh.contains(&state.status) {
|
||||
state.status = ComputeStatus::RefreshConfigurationPending;
|
||||
self.state_changed.notify_all();
|
||||
Ok(())
|
||||
} else if state.status == ComputeStatus::Init {
|
||||
// If the compute is in Init state, we can't refresh the configuration immediately,
|
||||
// but we should be able to do that soon.
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow::anyhow!(
|
||||
"Cannot refresh compute configuration in state {:?}",
|
||||
state.status
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapped this around `pg_ctl reload`, but right now we don't use
|
||||
// `pg_ctl` for start / stop.
|
||||
#[instrument(skip_all)]
|
||||
@@ -1994,8 +2060,6 @@ impl ComputeNode {
|
||||
// wait
|
||||
ComputeStatus::Init
|
||||
| ComputeStatus::Configuration
|
||||
| ComputeStatus::RefreshConfiguration
|
||||
| ComputeStatus::RefreshConfigurationPending
|
||||
| ComputeStatus::Empty => {
|
||||
state = self.state_changed.wait(state).unwrap();
|
||||
}
|
||||
@@ -2431,22 +2495,22 @@ LIMIT 100",
|
||||
/// The operation will time out after a specified duration.
|
||||
pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) {
|
||||
let state = self.state.lock().unwrap();
|
||||
let old_pageserver_connstr = state
|
||||
let old_pageserver_conninfo = state
|
||||
.pspec
|
||||
.as_ref()
|
||||
.expect("spec must be set")
|
||||
.pageserver_connstr
|
||||
.pageserver_conninfo
|
||||
.clone();
|
||||
let mut unchanged = true;
|
||||
let _ = self
|
||||
.state_changed
|
||||
.wait_timeout_while(state, duration, |s| {
|
||||
let pageserver_connstr = &s
|
||||
let pageserver_conninfo = &s
|
||||
.pspec
|
||||
.as_ref()
|
||||
.expect("spec must be set")
|
||||
.pageserver_connstr;
|
||||
unchanged = pageserver_connstr == &old_pageserver_connstr;
|
||||
.pageserver_conninfo;
|
||||
unchanged = pageserver_conninfo == &old_pageserver_conninfo;
|
||||
unchanged
|
||||
})
|
||||
.unwrap();
|
||||
@@ -2552,34 +2616,6 @@ LIMIT 100",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the compute spec and update related metrics.
|
||||
/// This is the central place where pspec is updated.
|
||||
pub fn set_spec(params: &ComputeNodeParams, state: &mut ComputeState, pspec: ParsedSpec) {
|
||||
state.pspec = Some(pspec);
|
||||
ComputeNode::update_attached_metric(params, state);
|
||||
let _ = logger::update_ids(¶ms.instance_id, &Some(params.compute_id.clone()));
|
||||
}
|
||||
|
||||
pub fn update_attached_metric(params: &ComputeNodeParams, state: &mut ComputeState) {
|
||||
// Update the pg_cctl_attached gauge when all identifiers are available.
|
||||
if let Some(instance_id) = ¶ms.instance_id {
|
||||
if let Some(pspec) = &state.pspec {
|
||||
// Clear all values in the metric
|
||||
COMPUTE_ATTACHED.reset();
|
||||
|
||||
// Set new metric value
|
||||
COMPUTE_ATTACHED
|
||||
.with_label_values(&[
|
||||
¶ms.compute_id,
|
||||
instance_id,
|
||||
&pspec.tenant_id.to_string(),
|
||||
&pspec.timeline_id.to_string(),
|
||||
])
|
||||
.set(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
|
||||
@@ -2704,7 +2740,10 @@ mod tests {
|
||||
|
||||
match ParsedSpec::try_from(spec.clone()) {
|
||||
Ok(_p) => panic!("Failed to detect duplicate entry"),
|
||||
Err(e) => assert!(e.starts_with("duplicate entry in safekeeper_connstrings:")),
|
||||
Err(e) => assert!(
|
||||
e.to_string()
|
||||
.starts_with("duplicate entry in safekeeper_connstrings:")
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +90,6 @@ impl ComputeNode {
|
||||
}
|
||||
|
||||
/// If there is a prewarm request ongoing, return `false`, `true` otherwise.
|
||||
/// Has a failpoint "compute-prewarm"
|
||||
pub fn prewarm_lfc(self: &Arc<Self>, from_endpoint: Option<String>) -> bool {
|
||||
{
|
||||
let state = &mut self.state.lock().unwrap().lfc_prewarm_state;
|
||||
@@ -113,8 +112,9 @@ impl ComputeNode {
|
||||
Err(err) => {
|
||||
crate::metrics::LFC_PREWARM_ERRORS.inc();
|
||||
error!(%err, "could not prewarm LFC");
|
||||
|
||||
LfcPrewarmState::Failed {
|
||||
error: format!("{err:#}"),
|
||||
error: err.to_string(),
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -135,20 +135,16 @@ impl ComputeNode {
|
||||
async fn prewarm_impl(&self, from_endpoint: Option<String>) -> Result<bool> {
|
||||
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(from_endpoint)?;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
fail::fail_point!("compute-prewarm", |_| {
|
||||
bail!("prewarm configured to fail because of a failpoint")
|
||||
});
|
||||
|
||||
info!(%url, "requesting LFC state from endpoint storage");
|
||||
let request = Client::new().get(&url).bearer_auth(token);
|
||||
let res = request.send().await.context("querying endpoint storage")?;
|
||||
match res.status() {
|
||||
let status = res.status();
|
||||
match status {
|
||||
StatusCode::OK => (),
|
||||
StatusCode::NOT_FOUND => {
|
||||
return Ok(false);
|
||||
}
|
||||
status => bail!("{status} querying endpoint storage"),
|
||||
_ => bail!("{status} querying endpoint storage"),
|
||||
}
|
||||
|
||||
let mut uncompressed = Vec::new();
|
||||
@@ -209,7 +205,7 @@ impl ComputeNode {
|
||||
crate::metrics::LFC_OFFLOAD_ERRORS.inc();
|
||||
error!(%err, "could not offload LFC state to endpoint storage");
|
||||
self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed {
|
||||
error: format!("{err:#}"),
|
||||
error: err.to_string(),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -217,22 +213,16 @@ impl ComputeNode {
|
||||
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?;
|
||||
info!(%url, "requesting LFC state from Postgres");
|
||||
|
||||
let row = ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
|
||||
let mut compressed = Vec::new();
|
||||
ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
|
||||
.await
|
||||
.context("connecting to postgres")?
|
||||
.query_one("select neon.get_local_cache_state()", &[])
|
||||
.await
|
||||
.context("querying LFC state")?;
|
||||
let state = row
|
||||
.try_get::<usize, Option<&[u8]>>(0)
|
||||
.context("deserializing LFC state")?;
|
||||
let Some(state) = state else {
|
||||
info!(%url, "empty LFC state, not exporting");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut compressed = Vec::new();
|
||||
ZstdEncoder::new(state)
|
||||
.context("querying LFC state")?
|
||||
.try_get::<usize, &[u8]>(0)
|
||||
.context("deserializing LFC state")
|
||||
.map(ZstdEncoder::new)?
|
||||
.read_to_end(&mut compressed)
|
||||
.await
|
||||
.context("compressing LFC state")?;
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use crate::compute::ComputeNode;
|
||||
use anyhow::{Context, Result, bail};
|
||||
use compute_api::responses::{LfcPrewarmState, PromoteConfig, PromoteState};
|
||||
use compute_api::spec::ComputeMode;
|
||||
use itertools::Itertools;
|
||||
use std::collections::HashMap;
|
||||
use compute_api::{
|
||||
responses::{LfcPrewarmState, PromoteState, SafekeepersLsn},
|
||||
spec::ComputeMode,
|
||||
};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
use tracing::info;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
impl ComputeNode {
|
||||
@@ -14,22 +13,21 @@ impl ComputeNode {
|
||||
/// and http client disconnects, this does not stop promotion, and subsequent
|
||||
/// calls block until promote finishes.
|
||||
/// Called by control plane on secondary after primary endpoint is terminated
|
||||
/// Has a failpoint "compute-promotion"
|
||||
pub async fn promote(self: &Arc<Self>, cfg: PromoteConfig) -> PromoteState {
|
||||
pub async fn promote(self: &Arc<Self>, safekeepers_lsn: SafekeepersLsn) -> PromoteState {
|
||||
let cloned = self.clone();
|
||||
let promote_fn = async move || {
|
||||
let Err(err) = cloned.promote_impl(cfg).await else {
|
||||
return PromoteState::Completed;
|
||||
};
|
||||
tracing::error!(%err, "promoting");
|
||||
PromoteState::Failed {
|
||||
error: format!("{err:#}"),
|
||||
}
|
||||
};
|
||||
|
||||
let start_promotion = || {
|
||||
let (tx, rx) = tokio::sync::watch::channel(PromoteState::NotPromoted);
|
||||
tokio::spawn(async move { tx.send(promote_fn().await) });
|
||||
tokio::spawn(async move {
|
||||
tx.send(match cloned.promote_impl(safekeepers_lsn).await {
|
||||
Ok(_) => PromoteState::Completed,
|
||||
Err(err) => {
|
||||
tracing::error!(%err, "promoting");
|
||||
PromoteState::Failed {
|
||||
error: err.to_string(),
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
rx
|
||||
};
|
||||
|
||||
@@ -49,7 +47,9 @@ impl ComputeNode {
|
||||
task.borrow().clone()
|
||||
}
|
||||
|
||||
async fn promote_impl(&self, mut cfg: PromoteConfig) -> Result<()> {
|
||||
// Why do we have to supply safekeepers?
|
||||
// For secondary we use primary_connection_conninfo so safekeepers field is empty
|
||||
async fn promote_impl(&self, safekeepers_lsn: SafekeepersLsn) -> Result<()> {
|
||||
{
|
||||
let state = self.state.lock().unwrap();
|
||||
let mode = &state.pspec.as_ref().unwrap().spec.mode;
|
||||
@@ -73,7 +73,7 @@ impl ComputeNode {
|
||||
.await
|
||||
.context("connecting to postgres")?;
|
||||
|
||||
let primary_lsn = cfg.wal_flush_lsn;
|
||||
let primary_lsn = safekeepers_lsn.wal_flush_lsn;
|
||||
let mut last_wal_replay_lsn: Lsn = Lsn::INVALID;
|
||||
const RETRIES: i32 = 20;
|
||||
for i in 0..=RETRIES {
|
||||
@@ -86,7 +86,7 @@ impl ComputeNode {
|
||||
if last_wal_replay_lsn >= primary_lsn {
|
||||
break;
|
||||
}
|
||||
info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}");
|
||||
tracing::info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
if last_wal_replay_lsn < primary_lsn {
|
||||
@@ -96,7 +96,7 @@ impl ComputeNode {
|
||||
// using $1 doesn't work with ALTER SYSTEM SET
|
||||
let safekeepers_sql = format!(
|
||||
"ALTER SYSTEM SET neon.safekeepers='{}'",
|
||||
cfg.spec.safekeeper_connstrings.join(",")
|
||||
safekeepers_lsn.safekeepers
|
||||
);
|
||||
client
|
||||
.query(&safekeepers_sql, &[])
|
||||
@@ -106,12 +106,6 @@ impl ComputeNode {
|
||||
.query("SELECT pg_reload_conf()", &[])
|
||||
.await
|
||||
.context("reloading postgres config")?;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
fail::fail_point!("compute-promotion", |_| {
|
||||
bail!("promotion configured to fail because of a failpoint")
|
||||
});
|
||||
|
||||
let row = client
|
||||
.query_one("SELECT * FROM pg_promote()", &[])
|
||||
.await
|
||||
@@ -131,36 +125,8 @@ impl ComputeNode {
|
||||
bail!("replica in read only mode after promotion");
|
||||
}
|
||||
|
||||
{
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let spec = &mut state.pspec.as_mut().unwrap().spec;
|
||||
spec.mode = ComputeMode::Primary;
|
||||
let new_conf = cfg.spec.cluster.postgresql_conf.as_mut().unwrap();
|
||||
let existing_conf = spec.cluster.postgresql_conf.as_ref().unwrap();
|
||||
Self::merge_spec(new_conf, existing_conf);
|
||||
}
|
||||
info!("applied new spec, reconfiguring as primary");
|
||||
self.reconfigure()
|
||||
}
|
||||
|
||||
/// Merge old and new Postgres conf specs to apply on secondary.
|
||||
/// Change new spec's port and safekeepers since they are supplied
|
||||
/// differenly
|
||||
fn merge_spec(new_conf: &mut String, existing_conf: &str) {
|
||||
let mut new_conf_set: HashMap<&str, &str> = new_conf
|
||||
.split_terminator('\n')
|
||||
.map(|e| e.split_once("=").expect("invalid item"))
|
||||
.collect();
|
||||
new_conf_set.remove("neon.safekeepers");
|
||||
|
||||
let existing_conf_set: HashMap<&str, &str> = existing_conf
|
||||
.split_terminator('\n')
|
||||
.map(|e| e.split_once("=").expect("invalid item"))
|
||||
.collect();
|
||||
new_conf_set.insert("port", existing_conf_set["port"]);
|
||||
*new_conf = new_conf_set
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.join("\n");
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.pspec.as_mut().unwrap().spec.mode = ComputeMode::Primary;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ use crate::pg_helpers::{
|
||||
};
|
||||
use crate::tls::{self, SERVER_CRT, SERVER_KEY};
|
||||
|
||||
use utils::shard::{ShardIndex, ShardNumber};
|
||||
|
||||
/// Check that `line` is inside a text file and put it there if it is not.
|
||||
/// Create file if it doesn't exist.
|
||||
pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
|
||||
@@ -56,15 +58,101 @@ pub fn write_postgres_conf(
|
||||
writeln!(file, "{conf}")?;
|
||||
}
|
||||
|
||||
// Stripe size GUC should be defined prior to connection string
|
||||
if let Some(stripe_size) = spec.shard_stripe_size {
|
||||
writeln!(file, "neon.stripe_size={stripe_size}")?;
|
||||
}
|
||||
// Add options for connecting to storage
|
||||
writeln!(file, "# Neon storage settings")?;
|
||||
if let Some(s) = &spec.pageserver_connstring {
|
||||
writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?;
|
||||
writeln!(file)?;
|
||||
if let Some(conninfo) = &spec.pageserver_connection_info {
|
||||
// Stripe size GUC should be defined prior to connection string
|
||||
if let Some(stripe_size) = conninfo.stripe_size {
|
||||
writeln!(
|
||||
file,
|
||||
"# from compute spec's pageserver_conninfo.stripe_size field"
|
||||
)?;
|
||||
writeln!(file, "neon.stripe_size={stripe_size}")?;
|
||||
}
|
||||
|
||||
let mut libpq_urls: Option<Vec<String>> = Some(Vec::new());
|
||||
let mut grpc_urls: Option<Vec<String>> = Some(Vec::new());
|
||||
let num_shards = if conninfo.shard_count.0 == 0 {
|
||||
1 // unsharded, treat it as a single shard
|
||||
} else {
|
||||
conninfo.shard_count.0
|
||||
};
|
||||
|
||||
for shard_number in 0..num_shards {
|
||||
let shard_index = ShardIndex {
|
||||
shard_number: ShardNumber(shard_number),
|
||||
shard_count: conninfo.shard_count,
|
||||
};
|
||||
let info = conninfo.shards.get(&shard_index).ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"shard {shard_index} missing from pageserver_connection_info shard map"
|
||||
)
|
||||
})?;
|
||||
|
||||
let first_pageserver = info
|
||||
.pageservers
|
||||
.first()
|
||||
.expect("must have at least one pageserver");
|
||||
|
||||
// Add the libpq URL to the array, or if the URL is missing, reset the array
|
||||
// forgetting any previous entries. All servers must have a libpq URL, or none
|
||||
// at all.
|
||||
if let Some(url) = &first_pageserver.libpq_url {
|
||||
if let Some(ref mut urls) = libpq_urls {
|
||||
urls.push(url.clone());
|
||||
}
|
||||
} else {
|
||||
libpq_urls = None
|
||||
}
|
||||
// Similarly for gRPC URLs
|
||||
if let Some(url) = &first_pageserver.grpc_url {
|
||||
if let Some(ref mut urls) = grpc_urls {
|
||||
urls.push(url.clone());
|
||||
}
|
||||
} else {
|
||||
grpc_urls = None
|
||||
}
|
||||
}
|
||||
if let Some(libpq_urls) = libpq_urls {
|
||||
writeln!(
|
||||
file,
|
||||
"# derived from compute spec's pageserver_conninfo field"
|
||||
)?;
|
||||
writeln!(
|
||||
file,
|
||||
"neon.pageserver_connstring={}",
|
||||
escape_conf_value(&libpq_urls.join(","))
|
||||
)?;
|
||||
} else {
|
||||
writeln!(file, "# no neon.pageserver_connstring")?;
|
||||
}
|
||||
if let Some(grpc_urls) = grpc_urls {
|
||||
writeln!(
|
||||
file,
|
||||
"# derived from compute spec's pageserver_conninfo field"
|
||||
)?;
|
||||
writeln!(
|
||||
file,
|
||||
"neon.pageserver_grpc_urls={}",
|
||||
escape_conf_value(&grpc_urls.join(","))
|
||||
)?;
|
||||
} else {
|
||||
writeln!(file, "# no neon.pageserver_grpc_urls")?;
|
||||
}
|
||||
} else {
|
||||
// Stripe size GUC should be defined prior to connection string
|
||||
if let Some(stripe_size) = spec.shard_stripe_size {
|
||||
writeln!(file, "# from compute spec's shard_stripe_size field")?;
|
||||
writeln!(file, "neon.stripe_size={stripe_size}")?;
|
||||
}
|
||||
|
||||
if let Some(s) = &spec.pageserver_connstring {
|
||||
writeln!(file, "# from compute spec's pageserver_connstring field")?;
|
||||
writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?;
|
||||
}
|
||||
}
|
||||
|
||||
if !spec.safekeeper_connstrings.is_empty() {
|
||||
let mut neon_safekeepers_value = String::new();
|
||||
tracing::info!(
|
||||
|
||||
@@ -1,40 +1,23 @@
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use anyhow::Result;
|
||||
use compute_api::responses::{ComputeConfig, ComputeStatus};
|
||||
use compute_api::responses::ComputeStatus;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
use crate::compute::{ComputeNode, ParsedSpec};
|
||||
use crate::spec::get_config_from_control_plane;
|
||||
use crate::compute::ComputeNode;
|
||||
|
||||
#[instrument(skip_all)]
|
||||
fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||
info!("waiting for reconfiguration requests");
|
||||
loop {
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
/* BEGIN_HADRON */
|
||||
// RefreshConfiguration should only be used inside the loop
|
||||
assert_ne!(state.status, ComputeStatus::RefreshConfiguration);
|
||||
/* END_HADRON */
|
||||
|
||||
if compute.params.lakebase_mode {
|
||||
while state.status != ComputeStatus::ConfigurationPending
|
||||
&& state.status != ComputeStatus::RefreshConfigurationPending
|
||||
&& state.status != ComputeStatus::Failed
|
||||
{
|
||||
info!("configurator: compute status: {:?}, sleeping", state.status);
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
} else {
|
||||
// We have to re-check the status after re-acquiring the lock because it could be that
|
||||
// the status has changed while we were waiting for the lock, and we might not need to
|
||||
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
|
||||
// we are waiting for a condition variable that will never be signaled.
|
||||
if state.status != ComputeStatus::ConfigurationPending {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
// We have to re-check the status after re-acquiring the lock because it could be that
|
||||
// the status has changed while we were waiting for the lock, and we might not need to
|
||||
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
|
||||
// we are waiting for a condition variable that will never be signaled.
|
||||
if state.status != ComputeStatus::ConfigurationPending {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
|
||||
// Re-check the status after waking up
|
||||
@@ -54,133 +37,6 @@ fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||
// XXX: used to test that API is blocking
|
||||
// std::thread::sleep(std::time::Duration::from_millis(10000));
|
||||
|
||||
compute.set_status(new_status);
|
||||
} else if state.status == ComputeStatus::RefreshConfigurationPending {
|
||||
info!(
|
||||
"compute node suspects its configuration is out of date, now refreshing configuration"
|
||||
);
|
||||
state.set_status(ComputeStatus::RefreshConfiguration, &compute.state_changed);
|
||||
// Drop the lock guard here to avoid holding the lock while downloading config from the control plane / HCC.
|
||||
// This is the only thread that can move compute_ctl out of the `RefreshConfiguration` state, so it
|
||||
// is safe to drop the lock like this.
|
||||
drop(state);
|
||||
|
||||
let get_config_result: anyhow::Result<ComputeConfig> =
|
||||
if let Some(config_path) = &compute.params.config_path_test_only {
|
||||
// This path is only to make testing easier. In production we always get the config from the HCC.
|
||||
info!(
|
||||
"reloading config.json from path: {}",
|
||||
config_path.to_string_lossy()
|
||||
);
|
||||
let path = Path::new(config_path);
|
||||
if let Ok(file) = File::open(path) {
|
||||
match serde_json::from_reader::<File, ComputeConfig>(file) {
|
||||
Ok(config) => Ok(config),
|
||||
Err(e) => {
|
||||
error!("could not parse config file: {}", e);
|
||||
Err(anyhow::anyhow!("could not parse config file: {}", e))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"could not open config file at path: {:?}",
|
||||
config_path.to_string_lossy()
|
||||
);
|
||||
Err(anyhow::anyhow!(
|
||||
"could not open config file at path: {}",
|
||||
config_path.to_string_lossy()
|
||||
))
|
||||
}
|
||||
} else if let Some(control_plane_uri) = &compute.params.control_plane_uri {
|
||||
get_config_from_control_plane(control_plane_uri, &compute.params.compute_id)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("config_path_test_only is not set"))
|
||||
};
|
||||
|
||||
// Parse any received ComputeSpec and transpose the result into a Result<Option<ParsedSpec>>.
|
||||
let parsed_spec_result: Result<Option<ParsedSpec>> =
|
||||
get_config_result.and_then(|config| {
|
||||
if let Some(spec) = config.spec {
|
||||
if let Ok(pspec) = ParsedSpec::try_from(spec) {
|
||||
Ok(Some(pspec))
|
||||
} else {
|
||||
Err(anyhow::anyhow!("could not parse spec"))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
});
|
||||
|
||||
let new_status: ComputeStatus;
|
||||
match parsed_spec_result {
|
||||
// Control plane (HCM) returned a spec and we were able to parse it.
|
||||
Ok(Some(pspec)) => {
|
||||
{
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
// Defensive programming to make sure this thread is indeed the only one that can move the compute
|
||||
// node out of the `RefreshConfiguration` state. Would be nice if we can encode this invariant
|
||||
// into the type system.
|
||||
assert_eq!(state.status, ComputeStatus::RefreshConfiguration);
|
||||
|
||||
if state.pspec.as_ref().map(|ps| ps.pageserver_connstr.clone())
|
||||
== Some(pspec.pageserver_connstr.clone())
|
||||
{
|
||||
info!(
|
||||
"Refresh configuration: Retrieved spec is the same as the current spec. Waiting for control plane to update the spec before attempting reconfiguration."
|
||||
);
|
||||
state.status = ComputeStatus::Running;
|
||||
compute.state_changed.notify_all();
|
||||
drop(state);
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
continue;
|
||||
}
|
||||
// state.pspec is consumed by compute.reconfigure() below. Note that compute.reconfigure() will acquire
|
||||
// the compute.state lock again so we need to have the lock guard go out of scope here. We could add a
|
||||
// "locked" variant of compute.reconfigure() that takes the lock guard as an argument to make this cleaner,
|
||||
// but it's not worth forking the codebase too much for this minor point alone right now.
|
||||
state.pspec = Some(pspec);
|
||||
}
|
||||
match compute.reconfigure() {
|
||||
Ok(_) => {
|
||||
info!("Refresh configuration: compute node configured");
|
||||
new_status = ComputeStatus::Running;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Refresh configuration: could not configure compute node: {}",
|
||||
e
|
||||
);
|
||||
// Set the compute node back to the `RefreshConfigurationPending` state if the configuration
|
||||
// was not successful. It should be okay to treat this situation the same as if the loop
|
||||
// hasn't executed yet as long as the detection side keeps notifying.
|
||||
new_status = ComputeStatus::RefreshConfigurationPending;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Control plane (HCM)'s response does not contain a spec. This is the "Empty" attachment case.
|
||||
Ok(None) => {
|
||||
info!(
|
||||
"Compute Manager signaled that this compute is no longer attached to any storage. Exiting."
|
||||
);
|
||||
// We just immediately terminate the whole compute_ctl in this case. It's not necessary to attempt a
|
||||
// clean shutdown as Postgres is probably not responding anyway (which is why we are in this refresh
|
||||
// configuration state).
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Various error cases:
|
||||
// - The request to the control plane (HCM) either failed or returned a malformed spec.
|
||||
// - compute_ctl itself is configured incorrectly (e.g., compute_id is not set).
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Refresh configuration: error getting a parsed spec: {:?}",
|
||||
e
|
||||
);
|
||||
new_status = ComputeStatus::RefreshConfigurationPending;
|
||||
// We may be dealing with an overloaded HCM if we end up in this path. Backoff 5 seconds before
|
||||
// retrying to avoid hammering the HCM.
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
}
|
||||
}
|
||||
compute.set_status(new_status);
|
||||
} else if state.status == ComputeStatus::Failed {
|
||||
info!("compute node is now in Failed state, exiting");
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
use metrics::{
|
||||
IntCounter, IntGaugeVec, core::Collector, proto::MetricFamily, register_int_counter,
|
||||
register_int_gauge_vec,
|
||||
};
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
// Counter keeping track of the number of PageStream request errors reported by Postgres.
|
||||
// An error is registered every time Postgres calls compute_ctl's /refresh_configuration API.
|
||||
// Postgres will invoke this API if it detected trouble with PageStream requests (get_page@lsn,
|
||||
// get_base_backup, etc.) it sends to any pageserver. An increase in this counter value typically
|
||||
// indicates Postgres downtime, as PageStream requests are critical for Postgres to function.
|
||||
pub static POSTGRES_PAGESTREAM_REQUEST_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
|
||||
register_int_counter!(
|
||||
"pg_cctl_pagestream_request_errors_total",
|
||||
"Number of PageStream request errors reported by the postgres process"
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
// Counter keeping track of the number of compute configuration errors due to Postgres statement
|
||||
// timeouts. An error is registered every time `ComputeNode::reconfigure()` fails due to Postgres
|
||||
// error code 57014 (query cancelled). This statement timeout typically occurs when postgres is
|
||||
// stuck in a problematic retry loop when the PS is reject its connection requests (usually due
|
||||
// to PG pointing at the wrong PS). We should investigate the root cause when this counter value
|
||||
// increases by checking PG and PS logs.
|
||||
pub static COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
|
||||
register_int_counter!(
|
||||
"pg_cctl_configure_statement_timeout_errors_total",
|
||||
"Number of compute configuration errors due to Postgres statement timeouts."
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub static COMPUTE_ATTACHED: Lazy<IntGaugeVec> = Lazy::new(|| {
|
||||
register_int_gauge_vec!(
|
||||
"pg_cctl_attached",
|
||||
"Compute node attached status (1 if attached)",
|
||||
&[
|
||||
"pg_compute_id",
|
||||
"pg_instance_id",
|
||||
"tenant_id",
|
||||
"timeline_id"
|
||||
]
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub fn collect() -> Vec<MetricFamily> {
|
||||
let mut metrics = Vec::new();
|
||||
metrics.extend(POSTGRES_PAGESTREAM_REQUEST_ERRORS.collect());
|
||||
metrics.extend(COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS.collect());
|
||||
metrics.extend(COMPUTE_ATTACHED.collect());
|
||||
metrics
|
||||
}
|
||||
|
||||
pub fn initialize_metrics() {
|
||||
Lazy::force(&POSTGRES_PAGESTREAM_REQUEST_ERRORS);
|
||||
Lazy::force(&COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS);
|
||||
Lazy::force(&COMPUTE_ATTACHED);
|
||||
}
|
||||
@@ -16,29 +16,13 @@ use crate::http::JsonResponse;
|
||||
#[derive(Clone, Debug)]
|
||||
pub(in crate::http) struct Authorize {
|
||||
compute_id: String,
|
||||
// BEGIN HADRON
|
||||
// Hadron instance ID. Only set if it's a Lakebase V1 a.k.a. Hadron instance.
|
||||
instance_id: Option<String>,
|
||||
// END HADRON
|
||||
jwks: JwkSet,
|
||||
validation: Validation,
|
||||
}
|
||||
|
||||
impl Authorize {
|
||||
pub fn new(compute_id: String, instance_id: Option<String>, jwks: JwkSet) -> Self {
|
||||
pub fn new(compute_id: String, jwks: JwkSet) -> Self {
|
||||
let mut validation = Validation::new(Algorithm::EdDSA);
|
||||
|
||||
// BEGIN HADRON
|
||||
let use_rsa = jwks.keys.iter().any(|jwk| {
|
||||
jwk.common
|
||||
.key_algorithm
|
||||
.is_some_and(|alg| alg == jsonwebtoken::jwk::KeyAlgorithm::RS256)
|
||||
});
|
||||
if use_rsa {
|
||||
validation = Validation::new(Algorithm::RS256);
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
validation.validate_exp = true;
|
||||
// Unused by the control plane
|
||||
validation.validate_nbf = false;
|
||||
@@ -50,7 +34,6 @@ impl Authorize {
|
||||
|
||||
Self {
|
||||
compute_id,
|
||||
instance_id,
|
||||
jwks,
|
||||
validation,
|
||||
}
|
||||
@@ -64,20 +47,10 @@ impl AsyncAuthorizeRequest<Body> for Authorize {
|
||||
|
||||
fn authorize(&mut self, mut request: Request<Body>) -> Self::Future {
|
||||
let compute_id = self.compute_id.clone();
|
||||
let is_hadron_instance = self.instance_id.is_some();
|
||||
let jwks = self.jwks.clone();
|
||||
let validation = self.validation.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
// BEGIN HADRON
|
||||
// In Hadron deployments the "external" HTTP endpoint on compute_ctl can only be
|
||||
// accessed by trusted components (enforced by dblet network policy), so we can bypass
|
||||
// all auth here.
|
||||
if is_hadron_instance {
|
||||
return Ok(request);
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
let TypedHeader(Authorization(bearer)) = request
|
||||
.extract_parts::<TypedHeader<Authorization<Bearer>>>()
|
||||
.await
|
||||
|
||||
@@ -96,7 +96,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ComputeSchemaWithLsn"
|
||||
$ref: "#/components/schemas/SafekeepersLsn"
|
||||
responses:
|
||||
200:
|
||||
description: Promote succeeded or wasn't started
|
||||
@@ -297,7 +297,14 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ComputeSchema"
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
properties:
|
||||
spec:
|
||||
# XXX: I don't want to explain current spec in the OpenAPI format,
|
||||
# as it could be changed really soon. Consider doing it later.
|
||||
type: object
|
||||
responses:
|
||||
200:
|
||||
description: Compute configuration finished.
|
||||
@@ -584,25 +591,18 @@ components:
|
||||
type: string
|
||||
example: "1.0.0"
|
||||
|
||||
ComputeSchema:
|
||||
SafekeepersLsn:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
ComputeSchemaWithLsn:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
- safekeepers
|
||||
- wal_flush_lsn
|
||||
properties:
|
||||
spec:
|
||||
$ref: "#/components/schemas/ComputeState"
|
||||
wal_flush_lsn:
|
||||
safekeepers:
|
||||
description: Primary replica safekeepers
|
||||
type: string
|
||||
wal_flush_lsn:
|
||||
description: Primary last WAL flush LSN
|
||||
type: string
|
||||
description: "last WAL flush LSN"
|
||||
example: "0/028F10D8"
|
||||
|
||||
LfcPrewarmState:
|
||||
type: object
|
||||
|
||||
@@ -43,12 +43,7 @@ pub(in crate::http) async fn configure(
|
||||
// configure request for tracing purposes.
|
||||
state.startup_span = Some(tracing::Span::current());
|
||||
|
||||
if compute.params.lakebase_mode {
|
||||
ComputeNode::set_spec(&compute.params, &mut state, pspec);
|
||||
} else {
|
||||
state.pspec = Some(pspec);
|
||||
}
|
||||
|
||||
state.pspec = Some(pspec);
|
||||
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
use crate::pg_isready::pg_isready;
|
||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
||||
use axum::{extract::State, http::StatusCode, response::Response};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// NOTE: NOT ENABLED YET
|
||||
/// Detect if the compute is alive.
|
||||
/// Called by the liveness probe of the compute container.
|
||||
pub(in crate::http) async fn hadron_liveness_probe(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Response {
|
||||
let port = match compute.params.connstr.port() {
|
||||
Some(port) => port,
|
||||
None => {
|
||||
return JsonResponse::error(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Failed to get the port from the connection string",
|
||||
);
|
||||
}
|
||||
};
|
||||
match pg_isready(&compute.params.pg_isready_bin, port) {
|
||||
Ok(_) => {
|
||||
// The connection is successful, so the compute is alive.
|
||||
// Return a 200 OK response.
|
||||
JsonResponse::success(StatusCode::OK, "ok")
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Hadron liveness probe failed: {}", e);
|
||||
// The connection failed, so the compute is not alive.
|
||||
// Return a 500 Internal Server Error response.
|
||||
JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,6 @@ use metrics::{Encoder, TextEncoder};
|
||||
|
||||
use crate::communicator_socket_client::connect_communicator_socket;
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::hadron_metrics;
|
||||
use crate::http::JsonResponse;
|
||||
use crate::metrics::collect;
|
||||
|
||||
@@ -22,18 +21,11 @@ pub(in crate::http) async fn get_metrics() -> Response {
|
||||
// When we call TextEncoder::encode() below, it will immediately return an
|
||||
// error if a metric family has no metrics, so we need to preemptively
|
||||
// filter out metric families with no metrics.
|
||||
let mut metrics = collect()
|
||||
let metrics = collect()
|
||||
.into_iter()
|
||||
.filter(|m| !m.get_metric().is_empty())
|
||||
.collect::<Vec<MetricFamily>>();
|
||||
|
||||
// Add Hadron metrics.
|
||||
let hadron_metrics: Vec<MetricFamily> = hadron_metrics::collect()
|
||||
.into_iter()
|
||||
.filter(|m| !m.get_metric().is_empty())
|
||||
.collect();
|
||||
metrics.extend(hadron_metrics);
|
||||
|
||||
let encoder = TextEncoder::new();
|
||||
let mut buffer = vec![];
|
||||
|
||||
|
||||
@@ -10,13 +10,11 @@ pub(in crate::http) mod extension_server;
|
||||
pub(in crate::http) mod extensions;
|
||||
pub(in crate::http) mod failpoints;
|
||||
pub(in crate::http) mod grants;
|
||||
pub(in crate::http) mod hadron_liveness_probe;
|
||||
pub(in crate::http) mod insights;
|
||||
pub(in crate::http) mod lfc;
|
||||
pub(in crate::http) mod metrics;
|
||||
pub(in crate::http) mod metrics_json;
|
||||
pub(in crate::http) mod promote;
|
||||
pub(in crate::http) mod refresh_configuration;
|
||||
pub(in crate::http) mod status;
|
||||
pub(in crate::http) mod terminate;
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use crate::http::JsonResponse;
|
||||
use axum::extract::Json;
|
||||
use axum::Form;
|
||||
use http::StatusCode;
|
||||
|
||||
pub(in crate::http) async fn promote(
|
||||
compute: axum::extract::State<std::sync::Arc<crate::compute::ComputeNode>>,
|
||||
Json(cfg): Json<compute_api::responses::PromoteConfig>,
|
||||
Form(safekeepers_lsn): Form<compute_api::responses::SafekeepersLsn>,
|
||||
) -> axum::response::Response {
|
||||
let state = compute.promote(cfg).await;
|
||||
if let compute_api::responses::PromoteState::Failed { error: _ } = state {
|
||||
return JsonResponse::create_response(StatusCode::INTERNAL_SERVER_ERROR, state);
|
||||
let state = compute.promote(safekeepers_lsn).await;
|
||||
if let compute_api::responses::PromoteState::Failed { error } = state {
|
||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, error);
|
||||
}
|
||||
JsonResponse::success(StatusCode::OK, state)
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
// This file is added by Hadron
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::State,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use http::StatusCode;
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS;
|
||||
use crate::http::JsonResponse;
|
||||
|
||||
/// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec
|
||||
/// from the HCC and attempt to reconfigure Postgres with the new spec. The method does not wait
|
||||
/// for the reconfiguration to complete. Rather, it simply delivers a signal that will cause
|
||||
/// configuration to be reloaded in a best effort manner. Invocation of this method does not
|
||||
/// guarantee that a reconfiguration will occur. The caller should consider keep sending this
|
||||
/// request while it believes that the compute configuration is out of date.
|
||||
pub(in crate::http) async fn refresh_configuration(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Response {
|
||||
POSTGRES_PAGESTREAM_REQUEST_ERRORS.inc();
|
||||
match compute.signal_refresh_configuration().await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::compute::{ComputeNode, forward_termination_signal};
|
||||
use crate::http::JsonResponse;
|
||||
use axum::extract::State;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum::response::Response;
|
||||
use axum_extra::extract::OptionalQuery;
|
||||
use compute_api::responses::{ComputeStatus, TerminateMode, TerminateResponse};
|
||||
use http::StatusCode;
|
||||
@@ -33,29 +33,7 @@ pub(in crate::http) async fn terminate(
|
||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
||||
return JsonResponse::invalid_status(state.status);
|
||||
}
|
||||
|
||||
// If compute is Empty, there's no Postgres to terminate. The regular compute_ctl termination path
|
||||
// assumes Postgres to be configured and running, so we just special-handle this case by exiting
|
||||
// the process directly.
|
||||
if compute.params.lakebase_mode && state.status == ComputeStatus::Empty {
|
||||
drop(state);
|
||||
info!("terminating empty compute - will exit process");
|
||||
|
||||
// Queue a task to exit the process after 5 seconds. The 5-second delay aims to
|
||||
// give enough time for the HTTP response to be sent so that HCM doesn't get an abrupt
|
||||
// connection termination.
|
||||
tokio::spawn(async {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
info!("exiting process after terminating empty compute");
|
||||
std::process::exit(0);
|
||||
});
|
||||
|
||||
return StatusCode::OK.into_response();
|
||||
}
|
||||
|
||||
// For Running status, proceed with normal termination
|
||||
state.set_status(mode.into(), &compute.state_changed);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
forward_termination_signal(false);
|
||||
|
||||
@@ -23,8 +23,7 @@ use super::{
|
||||
middleware::authorize::Authorize,
|
||||
routes::{
|
||||
check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions,
|
||||
grants, hadron_liveness_probe, insights, lfc, metrics, metrics_json, promote,
|
||||
refresh_configuration, status, terminate,
|
||||
grants, insights, lfc, metrics, metrics_json, promote, status, terminate,
|
||||
},
|
||||
};
|
||||
use crate::compute::ComputeNode;
|
||||
@@ -44,7 +43,6 @@ pub enum Server {
|
||||
port: u16,
|
||||
config: ComputeCtlConfig,
|
||||
compute_id: String,
|
||||
instance_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -69,12 +67,7 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
post(extension_server::download_extension),
|
||||
)
|
||||
.route("/extensions", post(extensions::install_extension))
|
||||
.route("/grants", post(grants::add_grant))
|
||||
// Hadron: Compute-initiated configuration refresh
|
||||
.route(
|
||||
"/refresh_configuration",
|
||||
post(refresh_configuration::refresh_configuration),
|
||||
);
|
||||
.route("/grants", post(grants::add_grant));
|
||||
|
||||
// Add in any testing support
|
||||
if cfg!(feature = "testing") {
|
||||
@@ -86,10 +79,7 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
router
|
||||
}
|
||||
Server::External {
|
||||
config,
|
||||
compute_id,
|
||||
instance_id,
|
||||
..
|
||||
config, compute_id, ..
|
||||
} => {
|
||||
let unauthenticated_router = Router::<Arc<ComputeNode>>::new()
|
||||
.route("/metrics", get(metrics::get_metrics))
|
||||
@@ -110,13 +100,8 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
.route("/metrics.json", get(metrics_json::get_metrics))
|
||||
.route("/status", get(status::get_status))
|
||||
.route("/terminate", post(terminate::terminate))
|
||||
.route(
|
||||
"/hadron_liveness_probe",
|
||||
get(hadron_liveness_probe::hadron_liveness_probe),
|
||||
)
|
||||
.layer(AsyncRequireAuthorizationLayer::new(Authorize::new(
|
||||
compute_id.clone(),
|
||||
instance_id.clone(),
|
||||
config.jwks.clone(),
|
||||
)));
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::collections::HashMap;
|
||||
|
||||
use anyhow::Result;
|
||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio_postgres::error::Error as PostgresError;
|
||||
use tokio_postgres::{Client, Config, NoTls};
|
||||
|
||||
@@ -120,7 +119,3 @@ pub async fn get_installed_extensions(
|
||||
extensions: extensions_map.into_values().collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn initialize_metrics() {
|
||||
Lazy::force(&INSTALLED_EXTENSIONS);
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ pub mod compute_prewarm;
|
||||
pub mod compute_promote;
|
||||
pub mod disk_quota;
|
||||
pub mod extension_server;
|
||||
pub mod hadron_metrics;
|
||||
pub mod installed_extensions;
|
||||
pub mod local_proxy;
|
||||
pub mod lsn_lease;
|
||||
@@ -25,7 +24,6 @@ mod migration;
|
||||
pub mod monitor;
|
||||
pub mod params;
|
||||
pub mod pg_helpers;
|
||||
pub mod pg_isready;
|
||||
pub mod pgbouncer;
|
||||
pub mod rsyslog;
|
||||
pub mod spec;
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{LazyLock, RwLock};
|
||||
use tracing::Subscriber;
|
||||
use tracing::info;
|
||||
use tracing_appender;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::{fmt, layer::SubscriberExt, registry::LookupSpan};
|
||||
|
||||
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
||||
///
|
||||
@@ -18,44 +15,16 @@ use tracing_subscriber::{fmt, layer::SubscriberExt, registry::LookupSpan};
|
||||
///
|
||||
pub fn init_tracing_and_logging(
|
||||
default_log_level: &str,
|
||||
log_dir_opt: &Option<String>,
|
||||
) -> anyhow::Result<(
|
||||
Option<tracing_utils::Provider>,
|
||||
Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
)> {
|
||||
) -> anyhow::Result<Option<tracing_utils::Provider>> {
|
||||
// Initialize Logging
|
||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||
|
||||
// Standard output streams
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_writer(std::io::stderr);
|
||||
|
||||
// Logs with file rotation. Files in `$log_dir/pgcctl.yyyy-MM-dd`
|
||||
let (json_to_file_layer, _file_logs_guard) = if let Some(log_dir) = log_dir_opt {
|
||||
std::fs::create_dir_all(log_dir)?;
|
||||
let file_logs_appender = tracing_appender::rolling::RollingFileAppender::builder()
|
||||
.rotation(tracing_appender::rolling::Rotation::DAILY)
|
||||
.filename_prefix("pgcctl")
|
||||
// Lib appends to existing files, so we will keep files for up to 2 days even on restart loops.
|
||||
// At minimum, log-daemon will have 1 day to detect and upload a file (if created right before midnight).
|
||||
.max_log_files(2)
|
||||
.build(log_dir)
|
||||
.expect("Initializing rolling file appender should succeed");
|
||||
let (file_logs_writer, _file_logs_guard) =
|
||||
tracing_appender::non_blocking(file_logs_appender);
|
||||
let json_to_file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.event_format(PgJsonLogShapeFormatter)
|
||||
.with_writer(file_logs_writer);
|
||||
(Some(json_to_file_layer), Some(_file_logs_guard))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
// Initialize OpenTelemetry
|
||||
let provider =
|
||||
tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default());
|
||||
@@ -66,13 +35,12 @@ pub fn init_tracing_and_logging(
|
||||
.with(env_filter)
|
||||
.with(otlp_layer)
|
||||
.with(fmt_layer)
|
||||
.with(json_to_file_layer)
|
||||
.init();
|
||||
tracing::info!("logging and tracing started");
|
||||
|
||||
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||
|
||||
Ok((provider, _file_logs_guard))
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
/// Replace all newline characters with a special character to make it
|
||||
@@ -127,157 +95,3 @@ pub fn startup_context_from_env() -> Option<opentelemetry::Context> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Track relevant id's
|
||||
const UNKNOWN_IDS: &str = r#""pg_instance_id": "", "pg_compute_id": """#;
|
||||
static IDS: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new(UNKNOWN_IDS.to_string()));
|
||||
|
||||
pub fn update_ids(instance_id: &Option<String>, compute_id: &Option<String>) -> anyhow::Result<()> {
|
||||
let ids = format!(
|
||||
r#""pg_instance_id": "{}", "pg_compute_id": "{}""#,
|
||||
instance_id.as_ref().map(|s| s.as_str()).unwrap_or_default(),
|
||||
compute_id.as_ref().map(|s| s.as_str()).unwrap_or_default()
|
||||
);
|
||||
let mut guard = IDS
|
||||
.write()
|
||||
.map_err(|e| anyhow::anyhow!("Log set id's rwlock poisoned: {}", e))?;
|
||||
*guard = ids;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Massage compute_ctl logs into PG json log shape so we can use the same Lumberjack setup.
|
||||
struct PgJsonLogShapeFormatter;
|
||||
impl<S, N> fmt::format::FormatEvent<S, N> for PgJsonLogShapeFormatter
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> fmt::format::FormatFields<'a> + 'static,
|
||||
{
|
||||
fn format_event(
|
||||
&self,
|
||||
ctx: &fmt::FmtContext<'_, S, N>,
|
||||
mut writer: fmt::format::Writer<'_>,
|
||||
event: &tracing::Event<'_>,
|
||||
) -> std::fmt::Result {
|
||||
// Format values from the event's metadata, and open message string
|
||||
let metadata = event.metadata();
|
||||
{
|
||||
let ids_guard = IDS.read();
|
||||
let ids = ids_guard
|
||||
.as_ref()
|
||||
.map(|guard| guard.as_str())
|
||||
// Surpress so that we don't lose all uploaded/ file logs if something goes super wrong. We would notice the missing id's.
|
||||
.unwrap_or(UNKNOWN_IDS);
|
||||
write!(
|
||||
&mut writer,
|
||||
r#"{{"timestamp": "{}", "error_severity": "{}", "file_name": "{}", "backend_type": "compute_ctl_self", {}, "message": "#,
|
||||
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f GMT"),
|
||||
metadata.level(),
|
||||
metadata.target(),
|
||||
ids
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut message = String::new();
|
||||
let message_writer = fmt::format::Writer::new(&mut message);
|
||||
|
||||
// Gather the message
|
||||
ctx.field_format().format_fields(message_writer, event)?;
|
||||
|
||||
// TODO: any better options than to copy-paste this OSS span formatter?
|
||||
// impl<S, N, T> FormatEvent<S, N> for Format<Full, T>
|
||||
// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/trait.FormatEvent.html#impl-FormatEvent%3CS,+N%3E-for-Format%3CFull,+T%3E
|
||||
|
||||
// write message, close bracket, and new line
|
||||
writeln!(writer, "{}}}", serde_json::to_string(&message).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::{cell::RefCell, io};
|
||||
|
||||
// Use thread_local! instead of Mutex for test isolation
|
||||
thread_local! {
|
||||
static WRITER_OUTPUT: RefCell<String> = const { RefCell::new(String::new()) };
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct StaticStringWriter;
|
||||
|
||||
impl io::Write for StaticStringWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let output = String::from_utf8(buf.to_vec()).expect("Invalid UTF-8 in test output");
|
||||
WRITER_OUTPUT.with(|s| s.borrow_mut().push_str(&output));
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::MakeWriter<'_> for StaticStringWriter {
|
||||
type Writer = Self;
|
||||
|
||||
fn make_writer(&self) -> Self::Writer {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_pg_json_shape_formatter() {
|
||||
// Use a scoped subscriber to prevent global state pollution
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.event_format(PgJsonLogShapeFormatter)
|
||||
.with_writer(StaticStringWriter),
|
||||
);
|
||||
|
||||
let _ = update_ids(&Some("000".to_string()), &Some("111".to_string()));
|
||||
|
||||
// Clear any previous test state
|
||||
WRITER_OUTPUT.with(|s| s.borrow_mut().clear());
|
||||
|
||||
let messages = [
|
||||
"test message",
|
||||
r#"json escape check: name="BatchSpanProcessor.Flush.ExportError" reason="Other(reqwest::Error { kind: Request, url: \"http://localhost:4318/v1/traces\", source: hyper_
|
||||
util::client::legacy::Error(Connect, ConnectError(\"tcp connect error\", Os { code: 111, kind: ConnectionRefused, message: \"Connection refused\" })) })" Failed during the export process"#,
|
||||
];
|
||||
|
||||
tracing::subscriber::with_default(subscriber, || {
|
||||
for message in messages {
|
||||
tracing::info!(message);
|
||||
}
|
||||
});
|
||||
tracing::info!("not test message");
|
||||
|
||||
// Get captured output
|
||||
let output = WRITER_OUTPUT.with(|s| s.borrow().clone());
|
||||
|
||||
let json_strings: Vec<&str> = output.lines().collect();
|
||||
assert_eq!(
|
||||
json_strings.len(),
|
||||
messages.len(),
|
||||
"Log didn't have the expected number of json strings."
|
||||
);
|
||||
|
||||
let json_string_shape_regex = regex::Regex::new(
|
||||
r#"\{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} GMT", "error_severity": "INFO", "file_name": ".+", "backend_type": "compute_ctl_self", "pg_instance_id": "000", "pg_compute_id": "111", "message": ".+"\}"#
|
||||
).unwrap();
|
||||
|
||||
for (i, expected_message) in messages.iter().enumerate() {
|
||||
let json_string = json_strings[i];
|
||||
assert!(
|
||||
json_string_shape_regex.is_match(json_string),
|
||||
"Json log didn't match expected pattern:\n{json_string}",
|
||||
);
|
||||
let parsed_json: serde_json::Value = serde_json::from_str(json_string).unwrap();
|
||||
let actual_message = parsed_json["message"].as_str().unwrap();
|
||||
assert_eq!(*expected_message, actual_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,13 @@ use std::thread;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use anyhow::{Result, bail};
|
||||
use compute_api::spec::{ComputeMode, PageserverProtocol};
|
||||
use itertools::Itertools as _;
|
||||
use compute_api::spec::{ComputeMode, PageserverConnectionInfo, PageserverProtocol};
|
||||
use pageserver_page_api as page_api;
|
||||
use postgres::{NoTls, SimpleQueryMessage};
|
||||
use tracing::{info, warn};
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
use utils::shard::{ShardCount, ShardNumber, TenantShardId};
|
||||
use utils::shard::TenantShardId;
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
|
||||
@@ -78,17 +77,16 @@ fn acquire_lsn_lease_with_retry(
|
||||
|
||||
loop {
|
||||
// Note: List of pageservers is dynamic, need to re-read configs before each attempt.
|
||||
let (connstrings, auth) = {
|
||||
let (conninfo, auth) = {
|
||||
let state = compute.state.lock().unwrap();
|
||||
let spec = state.pspec.as_ref().expect("spec must be set");
|
||||
(
|
||||
spec.pageserver_connstr.clone(),
|
||||
spec.pageserver_conninfo.clone(),
|
||||
spec.storage_auth_token.clone(),
|
||||
)
|
||||
};
|
||||
|
||||
let result =
|
||||
try_acquire_lsn_lease(&connstrings, auth.as_deref(), tenant_id, timeline_id, lsn);
|
||||
let result = try_acquire_lsn_lease(conninfo, auth.as_deref(), tenant_id, timeline_id, lsn);
|
||||
match result {
|
||||
Ok(Some(res)) => {
|
||||
return Ok(res);
|
||||
@@ -112,35 +110,44 @@ fn acquire_lsn_lease_with_retry(
|
||||
|
||||
/// Tries to acquire LSN leases on all Pageserver shards.
|
||||
fn try_acquire_lsn_lease(
|
||||
connstrings: &str,
|
||||
conninfo: PageserverConnectionInfo,
|
||||
auth: Option<&str>,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
lsn: Lsn,
|
||||
) -> Result<Option<SystemTime>> {
|
||||
let connstrings = connstrings.split(',').collect_vec();
|
||||
let shard_count = connstrings.len();
|
||||
let mut leases = Vec::new();
|
||||
|
||||
for (shard_number, &connstring) in connstrings.iter().enumerate() {
|
||||
let tenant_shard_id = match shard_count {
|
||||
0 | 1 => TenantShardId::unsharded(tenant_id),
|
||||
shard_count => TenantShardId {
|
||||
tenant_id,
|
||||
shard_number: ShardNumber(shard_number as u8),
|
||||
shard_count: ShardCount::new(shard_count as u8),
|
||||
},
|
||||
for (shard_index, shard) in conninfo.shards.into_iter() {
|
||||
let tenant_shard_id = TenantShardId {
|
||||
tenant_id,
|
||||
shard_number: shard_index.shard_number,
|
||||
shard_count: shard_index.shard_count,
|
||||
};
|
||||
|
||||
let lease = match PageserverProtocol::from_connstring(connstring)? {
|
||||
PageserverProtocol::Libpq => {
|
||||
acquire_lsn_lease_libpq(connstring, auth, tenant_shard_id, timeline_id, lsn)?
|
||||
}
|
||||
PageserverProtocol::Grpc => {
|
||||
acquire_lsn_lease_grpc(connstring, auth, tenant_shard_id, timeline_id, lsn)?
|
||||
}
|
||||
};
|
||||
leases.push(lease);
|
||||
// XXX: If there are more than pageserver for the one shard, do we need to get a
|
||||
// leas on all of them? Currently, that's what we assume, but this is hypothetical
|
||||
// as of this writing, as we never pass the info for more than one pageserver per
|
||||
// shard.
|
||||
for pageserver in shard.pageservers {
|
||||
let lease = match conninfo.prefer_protocol {
|
||||
PageserverProtocol::Grpc => acquire_lsn_lease_grpc(
|
||||
&pageserver.grpc_url.unwrap(),
|
||||
auth,
|
||||
tenant_shard_id,
|
||||
timeline_id,
|
||||
lsn,
|
||||
)?,
|
||||
PageserverProtocol::Libpq => acquire_lsn_lease_libpq(
|
||||
&pageserver.libpq_url.unwrap(),
|
||||
auth,
|
||||
tenant_shard_id,
|
||||
timeline_id,
|
||||
lsn,
|
||||
)?,
|
||||
};
|
||||
leases.push(lease);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(leases.into_iter().min().flatten())
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
|
||||
// Run `/usr/local/bin/pg_isready -p {port}`
|
||||
// Check the connectivity of PG
|
||||
// Success means PG is listening on the port and accepting connections
|
||||
// Note that PG does not need to authenticate the connection, nor reserve a connection quota for it.
|
||||
// See https://www.postgresql.org/docs/current/app-pg-isready.html
|
||||
pub fn pg_isready(bin: &str, port: u16) -> anyhow::Result<()> {
|
||||
let child_result = std::process::Command::new(bin)
|
||||
.arg("-p")
|
||||
.arg(port.to_string())
|
||||
.spawn();
|
||||
|
||||
child_result
|
||||
.context("spawn() failed")
|
||||
.and_then(|mut child| child.wait().context("wait() failed"))
|
||||
.and_then(|status| match status.success() {
|
||||
true => Ok(()),
|
||||
false => Err(anyhow!("process exited with {status}")),
|
||||
})
|
||||
// wrap any prior error with the overall context that we couldn't run the command
|
||||
.with_context(|| format!("could not run `{bin} --port {port}`"))
|
||||
}
|
||||
|
||||
// It's safe to assume pg_isready is under the same directory with postgres,
|
||||
// because it is a PG util bin installed along with postgres
|
||||
pub fn get_pg_isready_bin(pgbin: &str) -> String {
|
||||
let split = pgbin.split("/").collect::<Vec<&str>>();
|
||||
split[0..split.len() - 1].join("/") + "/pg_isready"
|
||||
}
|
||||
@@ -16,9 +16,14 @@ use std::time::Duration;
|
||||
use anyhow::{Context, Result, anyhow, bail};
|
||||
use clap::Parser;
|
||||
use compute_api::requests::ComputeClaimsScope;
|
||||
use compute_api::spec::{ComputeMode, PageserverProtocol};
|
||||
use compute_api::spec::{
|
||||
ComputeMode, PageserverConnectionInfo, PageserverProtocol, PageserverShardInfo,
|
||||
};
|
||||
use control_plane::broker::StorageBroker;
|
||||
use control_plane::endpoint::{ComputeControlPlane, EndpointTerminateMode};
|
||||
use control_plane::endpoint::{
|
||||
pageserver_conf_to_shard_conn_info, tenant_locate_response_to_conn_info,
|
||||
};
|
||||
use control_plane::endpoint_storage::{ENDPOINT_STORAGE_DEFAULT_ADDR, EndpointStorage};
|
||||
use control_plane::local_env;
|
||||
use control_plane::local_env::{
|
||||
@@ -44,7 +49,6 @@ use pageserver_api::models::{
|
||||
};
|
||||
use pageserver_api::shard::{DEFAULT_STRIPE_SIZE, ShardCount, ShardStripeSize, TenantShardId};
|
||||
use postgres_backend::AuthType;
|
||||
use postgres_connection::parse_host_port;
|
||||
use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId};
|
||||
use safekeeper_api::{
|
||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT,
|
||||
@@ -52,11 +56,11 @@ use safekeeper_api::{
|
||||
};
|
||||
use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR;
|
||||
use tokio::task::JoinSet;
|
||||
use url::Host;
|
||||
use utils::auth::{Claims, Scope};
|
||||
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
use utils::project_git_version;
|
||||
use utils::shard::ShardIndex;
|
||||
|
||||
// Default id of a safekeeper node, if not specified on the command line.
|
||||
const DEFAULT_SAFEKEEPER_ID: NodeId = NodeId(1);
|
||||
@@ -560,9 +564,7 @@ enum EndpointCmd {
|
||||
Create(EndpointCreateCmdArgs),
|
||||
Start(EndpointStartCmdArgs),
|
||||
Reconfigure(EndpointReconfigureCmdArgs),
|
||||
RefreshConfiguration(EndpointRefreshConfigurationArgs),
|
||||
Stop(EndpointStopCmdArgs),
|
||||
UpdatePageservers(EndpointUpdatePageserversCmdArgs),
|
||||
GenerateJwt(EndpointGenerateJwtCmdArgs),
|
||||
}
|
||||
|
||||
@@ -723,13 +725,6 @@ struct EndpointReconfigureCmdArgs {
|
||||
safekeepers: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Refresh the endpoint's configuration by forcing it reload it's spec")]
|
||||
struct EndpointRefreshConfigurationArgs {
|
||||
#[clap(help = "Postgres endpoint id")]
|
||||
endpoint_id: String,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Stop an endpoint")]
|
||||
struct EndpointStopCmdArgs {
|
||||
@@ -747,16 +742,6 @@ struct EndpointStopCmdArgs {
|
||||
mode: EndpointTerminateMode,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Update the pageservers in the spec file of the compute endpoint")]
|
||||
struct EndpointUpdatePageserversCmdArgs {
|
||||
#[clap(help = "Postgres endpoint id")]
|
||||
endpoint_id: String,
|
||||
|
||||
#[clap(short = 'p', long, help = "Specified pageserver id")]
|
||||
pageserver_id: Option<NodeId>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Generate a JWT for an endpoint")]
|
||||
struct EndpointGenerateJwtCmdArgs {
|
||||
@@ -1536,7 +1521,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.ok_or_else(|| anyhow!("endpoint {endpoint_id} not found"))?;
|
||||
.ok_or_else(|| anyhow::anyhow!("endpoint {endpoint_id} not found"))?;
|
||||
|
||||
if !args.allow_multiple {
|
||||
cplane.check_conflicting_endpoints(
|
||||
@@ -1546,62 +1531,56 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
)?;
|
||||
}
|
||||
|
||||
let (pageservers, stripe_size) = if let Some(pageserver_id) = pageserver_id {
|
||||
let conf = env.get_pageserver_conf(pageserver_id).unwrap();
|
||||
// Use gRPC if requested.
|
||||
let pageserver = if endpoint.grpc {
|
||||
let grpc_addr = conf.listen_grpc_addr.as_ref().expect("bad config");
|
||||
let (host, port) = parse_host_port(grpc_addr)?;
|
||||
let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT);
|
||||
(PageserverProtocol::Grpc, host, port)
|
||||
} else {
|
||||
let (host, port) = parse_host_port(&conf.listen_pg_addr)?;
|
||||
let port = port.unwrap_or(5432);
|
||||
(PageserverProtocol::Libpq, host, port)
|
||||
let prefer_protocol = if endpoint.grpc {
|
||||
PageserverProtocol::Grpc
|
||||
} else {
|
||||
PageserverProtocol::Libpq
|
||||
};
|
||||
|
||||
let mut pageserver_conninfo = if let Some(ps_id) = pageserver_id {
|
||||
let conf = env.get_pageserver_conf(ps_id).unwrap();
|
||||
let ps_conninfo = pageserver_conf_to_shard_conn_info(conf)?;
|
||||
|
||||
let shard_info = PageserverShardInfo {
|
||||
pageservers: vec![ps_conninfo],
|
||||
};
|
||||
// If caller is telling us what pageserver to use, this is not a tenant which is
|
||||
// fully managed by storage controller, therefore not sharded.
|
||||
(vec![pageserver], DEFAULT_STRIPE_SIZE)
|
||||
let shards: HashMap<_, _> = vec![(ShardIndex::unsharded(), shard_info)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
PageserverConnectionInfo {
|
||||
shard_count: ShardCount(0),
|
||||
stripe_size: None,
|
||||
shards,
|
||||
prefer_protocol,
|
||||
}
|
||||
} else {
|
||||
// Look up the currently attached location of the tenant, and its striping metadata,
|
||||
// to pass these on to postgres.
|
||||
let storage_controller = StorageController::from_env(env);
|
||||
let locate_result = storage_controller.tenant_locate(endpoint.tenant_id).await?;
|
||||
let pageservers = futures::future::try_join_all(
|
||||
locate_result.shards.into_iter().map(|shard| async move {
|
||||
if let ComputeMode::Static(lsn) = endpoint.mode {
|
||||
// Initialize LSN leases for static computes.
|
||||
assert!(!locate_result.shards.is_empty());
|
||||
|
||||
// Initialize LSN leases for static computes.
|
||||
if let ComputeMode::Static(lsn) = endpoint.mode {
|
||||
futures::future::try_join_all(locate_result.shards.iter().map(
|
||||
|shard| async move {
|
||||
let conf = env.get_pageserver_conf(shard.node_id).unwrap();
|
||||
let pageserver = PageServerNode::from_env(env, conf);
|
||||
|
||||
pageserver
|
||||
.http_client
|
||||
.timeline_init_lsn_lease(shard.shard_id, endpoint.timeline_id, lsn)
|
||||
.await?;
|
||||
}
|
||||
.await
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
}
|
||||
|
||||
let pageserver = if endpoint.grpc {
|
||||
(
|
||||
PageserverProtocol::Grpc,
|
||||
Host::parse(&shard.listen_grpc_addr.expect("no gRPC address"))?,
|
||||
shard.listen_grpc_port.expect("no gRPC port"),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
PageserverProtocol::Libpq,
|
||||
Host::parse(&shard.listen_pg_addr)?,
|
||||
shard.listen_pg_port,
|
||||
)
|
||||
};
|
||||
anyhow::Ok(pageserver)
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
let stripe_size = locate_result.shard_params.stripe_size;
|
||||
|
||||
(pageservers, stripe_size)
|
||||
tenant_locate_response_to_conn_info(&locate_result)?
|
||||
};
|
||||
assert!(!pageservers.is_empty());
|
||||
pageserver_conninfo.prefer_protocol = prefer_protocol;
|
||||
|
||||
let ps_conf = env.get_pageserver_conf(DEFAULT_PAGESERVER_ID)?;
|
||||
let auth_token = if matches!(ps_conf.pg_auth_type, AuthType::NeonJWT) {
|
||||
@@ -1631,9 +1610,8 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
endpoint_storage_addr,
|
||||
safekeepers_generation,
|
||||
safekeepers,
|
||||
pageservers,
|
||||
pageserver_conninfo,
|
||||
remote_ext_base_url: remote_ext_base_url.clone(),
|
||||
shard_stripe_size: stripe_size.0 as usize,
|
||||
create_test_user: args.create_test_user,
|
||||
start_timeout: args.start_timeout,
|
||||
autoprewarm: args.autoprewarm,
|
||||
@@ -1644,105 +1622,53 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
println!("Starting existing endpoint {endpoint_id}...");
|
||||
endpoint.start(args).await?;
|
||||
}
|
||||
EndpointCmd::UpdatePageservers(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
let pageservers = match args.pageserver_id {
|
||||
Some(pageserver_id) => {
|
||||
let pageserver =
|
||||
PageServerNode::from_env(env, env.get_pageserver_conf(pageserver_id)?);
|
||||
|
||||
vec![(
|
||||
PageserverProtocol::Libpq,
|
||||
pageserver.pg_connection_config.host().clone(),
|
||||
pageserver.pg_connection_config.port(),
|
||||
)]
|
||||
}
|
||||
None => {
|
||||
let storage_controller = StorageController::from_env(env);
|
||||
storage_controller
|
||||
.tenant_locate(endpoint.tenant_id)
|
||||
.await?
|
||||
.shards
|
||||
.into_iter()
|
||||
.map(|shard| {
|
||||
(
|
||||
PageserverProtocol::Libpq,
|
||||
Host::parse(&shard.listen_pg_addr)
|
||||
.expect("Storage controller reported malformed host"),
|
||||
shard.listen_pg_port,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
endpoint.update_pageservers_in_config(pageservers).await?;
|
||||
}
|
||||
EndpointCmd::Reconfigure(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
let pageservers = if let Some(ps_id) = args.endpoint_pageserver_id {
|
||||
let conf = env.get_pageserver_conf(ps_id)?;
|
||||
// Use gRPC if requested.
|
||||
let pageserver = if endpoint.grpc {
|
||||
let grpc_addr = conf.listen_grpc_addr.as_ref().expect("bad config");
|
||||
let (host, port) = parse_host_port(grpc_addr)?;
|
||||
let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT);
|
||||
(PageserverProtocol::Grpc, host, port)
|
||||
} else {
|
||||
let (host, port) = parse_host_port(&conf.listen_pg_addr)?;
|
||||
let port = port.unwrap_or(5432);
|
||||
(PageserverProtocol::Libpq, host, port)
|
||||
};
|
||||
vec![pageserver]
|
||||
|
||||
let prefer_protocol = if endpoint.grpc {
|
||||
PageserverProtocol::Grpc
|
||||
} else {
|
||||
let storage_controller = StorageController::from_env(env);
|
||||
storage_controller
|
||||
.tenant_locate(endpoint.tenant_id)
|
||||
.await?
|
||||
.shards
|
||||
.into_iter()
|
||||
.map(|shard| {
|
||||
// Use gRPC if requested.
|
||||
if endpoint.grpc {
|
||||
(
|
||||
PageserverProtocol::Grpc,
|
||||
Host::parse(&shard.listen_grpc_addr.expect("no gRPC address"))
|
||||
.expect("bad hostname"),
|
||||
shard.listen_grpc_port.expect("no gRPC port"),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
PageserverProtocol::Libpq,
|
||||
Host::parse(&shard.listen_pg_addr).expect("bad hostname"),
|
||||
shard.listen_pg_port,
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
PageserverProtocol::Libpq
|
||||
};
|
||||
let mut pageserver_conninfo = if let Some(ps_id) = args.endpoint_pageserver_id {
|
||||
let conf = env.get_pageserver_conf(ps_id)?;
|
||||
let ps_conninfo = pageserver_conf_to_shard_conn_info(conf)?;
|
||||
let shard_info = PageserverShardInfo {
|
||||
pageservers: vec![ps_conninfo],
|
||||
};
|
||||
|
||||
// If caller is telling us what pageserver to use, this is not a tenant which is
|
||||
// fully managed by storage controller, therefore not sharded.
|
||||
let shards: HashMap<_, _> = vec![(ShardIndex::unsharded(), shard_info)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
PageserverConnectionInfo {
|
||||
shard_count: ShardCount::unsharded(),
|
||||
stripe_size: None,
|
||||
shards,
|
||||
prefer_protocol,
|
||||
}
|
||||
} else {
|
||||
// Look up the currently attached location of the tenant, and its striping metadata,
|
||||
// to pass these on to postgres.
|
||||
let storage_controller = StorageController::from_env(env);
|
||||
let locate_result = storage_controller.tenant_locate(endpoint.tenant_id).await?;
|
||||
|
||||
tenant_locate_response_to_conn_info(&locate_result)?
|
||||
};
|
||||
pageserver_conninfo.prefer_protocol = prefer_protocol;
|
||||
|
||||
// If --safekeepers argument is given, use only the listed
|
||||
// safekeeper nodes; otherwise all from the env.
|
||||
let safekeepers = parse_safekeepers(&args.safekeepers)?;
|
||||
endpoint
|
||||
.reconfigure(Some(pageservers), None, safekeepers, None)
|
||||
.reconfigure(Some(&pageserver_conninfo), safekeepers, None)
|
||||
.await?;
|
||||
}
|
||||
EndpointCmd::RefreshConfiguration(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
endpoint.refresh_configuration().await?;
|
||||
}
|
||||
EndpointCmd::Stop(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
//! <other PostgreSQL files>
|
||||
//! ```
|
||||
//!
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Display;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream};
|
||||
use std::path::PathBuf;
|
||||
@@ -58,8 +58,12 @@ use compute_api::responses::{
|
||||
};
|
||||
use compute_api::spec::{
|
||||
Cluster, ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, Database, PageserverProtocol,
|
||||
PgIdent, RemoteExtSpec, Role,
|
||||
PageserverShardInfo, PgIdent, RemoteExtSpec, Role,
|
||||
};
|
||||
|
||||
// re-export these, because they're used in the reconfigure() function
|
||||
pub use compute_api::spec::{PageserverConnectionInfo, PageserverShardConnectionInfo};
|
||||
|
||||
use jsonwebtoken::jwk::{
|
||||
AlgorithmParameters, CommonParameters, EllipticCurve, Jwk, JwkSet, KeyAlgorithm, KeyOperations,
|
||||
OctetKeyPairParameters, OctetKeyPairType, PublicKeyUse,
|
||||
@@ -74,9 +78,11 @@ use sha2::{Digest, Sha256};
|
||||
use spki::der::Decode;
|
||||
use spki::{SubjectPublicKeyInfo, SubjectPublicKeyInfoRef};
|
||||
use tracing::debug;
|
||||
use url::Host;
|
||||
use utils::id::{NodeId, TenantId, TimelineId};
|
||||
use utils::shard::ShardStripeSize;
|
||||
use utils::shard::{ShardIndex, ShardNumber};
|
||||
|
||||
use pageserver_api::config::DEFAULT_GRPC_LISTEN_PORT as DEFAULT_PAGESERVER_GRPC_PORT;
|
||||
use postgres_connection::parse_host_port;
|
||||
|
||||
use crate::local_env::LocalEnv;
|
||||
use crate::postgresql_conf::PostgresConf;
|
||||
@@ -387,9 +393,8 @@ pub struct EndpointStartArgs {
|
||||
pub endpoint_storage_addr: String,
|
||||
pub safekeepers_generation: Option<SafekeeperGeneration>,
|
||||
pub safekeepers: Vec<NodeId>,
|
||||
pub pageservers: Vec<(PageserverProtocol, Host, u16)>,
|
||||
pub pageserver_conninfo: PageserverConnectionInfo,
|
||||
pub remote_ext_base_url: Option<String>,
|
||||
pub shard_stripe_size: usize,
|
||||
pub create_test_user: bool,
|
||||
pub start_timeout: Duration,
|
||||
pub autoprewarm: bool,
|
||||
@@ -662,14 +667,6 @@ impl Endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_pageserver_connstr(pageservers: &[(PageserverProtocol, Host, u16)]) -> String {
|
||||
pageservers
|
||||
.iter()
|
||||
.map(|(scheme, host, port)| format!("{scheme}://no_user@{host}:{port}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(",")
|
||||
}
|
||||
|
||||
/// Map safekeepers ids to the actual connection strings.
|
||||
fn build_safekeepers_connstrs(&self, sk_ids: Vec<NodeId>) -> Result<Vec<String>> {
|
||||
let mut safekeeper_connstrings = Vec::new();
|
||||
@@ -715,9 +712,6 @@ impl Endpoint {
|
||||
std::fs::remove_dir_all(self.pgdata())?;
|
||||
}
|
||||
|
||||
let pageserver_connstring = Self::build_pageserver_connstr(&args.pageservers);
|
||||
assert!(!pageserver_connstring.is_empty());
|
||||
|
||||
let safekeeper_connstrings = self.build_safekeepers_connstrs(args.safekeepers)?;
|
||||
|
||||
// check for file remote_extensions_spec.json
|
||||
@@ -732,6 +726,45 @@ impl Endpoint {
|
||||
remote_extensions = None;
|
||||
};
|
||||
|
||||
// For the sake of backwards-compatibility, also fill in 'pageserver_connstring'
|
||||
//
|
||||
// Use a closure so that we can conviniently return None in the middle of the
|
||||
// loop.
|
||||
let pageserver_connstring: Option<String> = (|| {
|
||||
let num_shards = if args.pageserver_conninfo.shard_count.is_unsharded() {
|
||||
1
|
||||
} else {
|
||||
args.pageserver_conninfo.shard_count.0
|
||||
};
|
||||
let mut connstrings = Vec::new();
|
||||
for shard_no in 0..num_shards {
|
||||
let shard_index = ShardIndex {
|
||||
shard_count: args.pageserver_conninfo.shard_count,
|
||||
shard_number: ShardNumber(shard_no),
|
||||
};
|
||||
let shard = args
|
||||
.pageserver_conninfo
|
||||
.shards
|
||||
.get(&shard_index)
|
||||
.ok_or_else(|| {
|
||||
anyhow!(
|
||||
"shard {} not found in pageserver_connection_info",
|
||||
shard_index
|
||||
)
|
||||
})?;
|
||||
let pageserver = shard
|
||||
.pageservers
|
||||
.first()
|
||||
.ok_or(anyhow!("must have at least one pageserver"))?;
|
||||
if let Some(libpq_url) = &pageserver.libpq_url {
|
||||
connstrings.push(libpq_url.clone());
|
||||
} else {
|
||||
return Ok::<_, anyhow::Error>(None);
|
||||
}
|
||||
}
|
||||
Ok(Some(connstrings.join(",")))
|
||||
})()?;
|
||||
|
||||
// Create config file
|
||||
let config = {
|
||||
let mut spec = ComputeSpec {
|
||||
@@ -776,13 +809,14 @@ impl Endpoint {
|
||||
branch_id: None,
|
||||
endpoint_id: Some(self.endpoint_id.clone()),
|
||||
mode: self.mode,
|
||||
pageserver_connstring: Some(pageserver_connstring),
|
||||
pageserver_connection_info: Some(args.pageserver_conninfo.clone()),
|
||||
pageserver_connstring,
|
||||
safekeepers_generation: args.safekeepers_generation.map(|g| g.into_inner()),
|
||||
safekeeper_connstrings,
|
||||
storage_auth_token: args.auth_token.clone(),
|
||||
remote_extensions,
|
||||
pgbouncer_settings: None,
|
||||
shard_stripe_size: Some(args.shard_stripe_size),
|
||||
shard_stripe_size: args.pageserver_conninfo.stripe_size, // redundant with pageserver_connection_info.stripe_size
|
||||
local_proxy_config: None,
|
||||
reconfigure_concurrency: self.reconfigure_concurrency,
|
||||
drop_subscriptions_before_start: self.drop_subscriptions_before_start,
|
||||
@@ -937,9 +971,7 @@ impl Endpoint {
|
||||
| ComputeStatus::Configuration
|
||||
| ComputeStatus::TerminationPendingFast
|
||||
| ComputeStatus::TerminationPendingImmediate
|
||||
| ComputeStatus::Terminated
|
||||
| ComputeStatus::RefreshConfigurationPending
|
||||
| ComputeStatus::RefreshConfiguration => {
|
||||
| ComputeStatus::Terminated => {
|
||||
bail!("unexpected compute status: {:?}", state.status)
|
||||
}
|
||||
}
|
||||
@@ -962,29 +994,6 @@ impl Endpoint {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Update the pageservers in the spec file of the endpoint. This is useful to test the spec refresh scenario.
|
||||
pub async fn update_pageservers_in_config(
|
||||
&self,
|
||||
pageservers: Vec<(PageserverProtocol, Host, u16)>,
|
||||
) -> Result<()> {
|
||||
let config_path = self.endpoint_path().join("config.json");
|
||||
let mut config: ComputeConfig = {
|
||||
let file = std::fs::File::open(&config_path)?;
|
||||
serde_json::from_reader(file)?
|
||||
};
|
||||
|
||||
let pageserver_connstring = Self::build_pageserver_connstr(&pageservers);
|
||||
assert!(!pageserver_connstring.is_empty());
|
||||
let mut spec = config.spec.unwrap();
|
||||
spec.pageserver_connstring = Some(pageserver_connstring);
|
||||
config.spec = Some(spec);
|
||||
|
||||
let file = std::fs::File::create(&config_path)?;
|
||||
serde_json::to_writer_pretty(file, &config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Call the /status HTTP API
|
||||
pub async fn get_status(&self) -> Result<ComputeStatusResponse> {
|
||||
let client = reqwest::Client::new();
|
||||
@@ -1019,8 +1028,7 @@ impl Endpoint {
|
||||
|
||||
pub async fn reconfigure(
|
||||
&self,
|
||||
pageservers: Option<Vec<(PageserverProtocol, Host, u16)>>,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
pageserver_conninfo: Option<&PageserverConnectionInfo>,
|
||||
safekeepers: Option<Vec<NodeId>>,
|
||||
safekeeper_generation: Option<SafekeeperGeneration>,
|
||||
) -> Result<()> {
|
||||
@@ -1035,15 +1043,15 @@ impl Endpoint {
|
||||
let postgresql_conf = self.read_postgresql_conf()?;
|
||||
spec.cluster.postgresql_conf = Some(postgresql_conf);
|
||||
|
||||
// If pageservers are not specified, don't change them.
|
||||
if let Some(pageservers) = pageservers {
|
||||
anyhow::ensure!(!pageservers.is_empty(), "no pageservers provided");
|
||||
|
||||
let pageserver_connstr = Self::build_pageserver_connstr(&pageservers);
|
||||
spec.pageserver_connstring = Some(pageserver_connstr);
|
||||
if stripe_size.is_some() {
|
||||
spec.shard_stripe_size = stripe_size.map(|s| s.0 as usize);
|
||||
}
|
||||
if let Some(pageserver_conninfo) = pageserver_conninfo {
|
||||
// If pageservers are provided, we need to ensure that they are not empty.
|
||||
// This is a requirement for the compute_ctl configuration.
|
||||
anyhow::ensure!(
|
||||
!pageserver_conninfo.shards.is_empty(),
|
||||
"no pageservers provided"
|
||||
);
|
||||
spec.pageserver_connection_info = Some(pageserver_conninfo.clone());
|
||||
spec.shard_stripe_size = pageserver_conninfo.stripe_size;
|
||||
}
|
||||
|
||||
// If safekeepers are not specified, don't change them.
|
||||
@@ -1092,11 +1100,9 @@ impl Endpoint {
|
||||
|
||||
pub async fn reconfigure_pageservers(
|
||||
&self,
|
||||
pageservers: Vec<(PageserverProtocol, Host, u16)>,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
pageservers: &PageserverConnectionInfo,
|
||||
) -> Result<()> {
|
||||
self.reconfigure(Some(pageservers), stripe_size, None, None)
|
||||
.await
|
||||
self.reconfigure(Some(pageservers), None, None).await
|
||||
}
|
||||
|
||||
pub async fn reconfigure_safekeepers(
|
||||
@@ -1104,7 +1110,7 @@ impl Endpoint {
|
||||
safekeepers: Vec<NodeId>,
|
||||
generation: SafekeeperGeneration,
|
||||
) -> Result<()> {
|
||||
self.reconfigure(None, None, Some(safekeepers), Some(generation))
|
||||
self.reconfigure(None, Some(safekeepers), Some(generation))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -1150,33 +1156,6 @@ impl Endpoint {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub async fn refresh_configuration(&self) -> Result<()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.unwrap();
|
||||
let response = client
|
||||
.post(format!(
|
||||
"http://{}:{}/refresh_configuration",
|
||||
self.internal_http_address.ip(),
|
||||
self.internal_http_address.port()
|
||||
))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let status = response.status();
|
||||
if !(status.is_client_error() || status.is_server_error()) {
|
||||
Ok(())
|
||||
} else {
|
||||
let url = response.url().to_owned();
|
||||
let msg = match response.text().await {
|
||||
Ok(err_body) => format!("Error: {err_body}"),
|
||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||
};
|
||||
Err(anyhow::anyhow!(msg))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connstr(&self, user: &str, db_name: &str) -> String {
|
||||
format!(
|
||||
"postgresql://{}@{}:{}/{}",
|
||||
@@ -1187,3 +1166,68 @@ impl Endpoint {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pageserver_conf_to_shard_conn_info(
|
||||
conf: &crate::local_env::PageServerConf,
|
||||
) -> Result<PageserverShardConnectionInfo> {
|
||||
let libpq_url = {
|
||||
let (host, port) = parse_host_port(&conf.listen_pg_addr)?;
|
||||
let port = port.unwrap_or(5432);
|
||||
Some(format!("postgres://no_user@{host}:{port}"))
|
||||
};
|
||||
let grpc_url = if let Some(grpc_addr) = &conf.listen_grpc_addr {
|
||||
let (host, port) = parse_host_port(grpc_addr)?;
|
||||
let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT);
|
||||
Some(format!("grpc://no_user@{host}:{port}"))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(PageserverShardConnectionInfo {
|
||||
id: Some(conf.id.to_string()),
|
||||
libpq_url,
|
||||
grpc_url,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tenant_locate_response_to_conn_info(
|
||||
response: &pageserver_api::controller_api::TenantLocateResponse,
|
||||
) -> Result<PageserverConnectionInfo> {
|
||||
let mut shards = HashMap::new();
|
||||
for shard in response.shards.iter() {
|
||||
tracing::info!("parsing {}", shard.listen_pg_addr);
|
||||
let libpq_url = {
|
||||
let host = &shard.listen_pg_addr;
|
||||
let port = shard.listen_pg_port;
|
||||
Some(format!("postgres://no_user@{host}:{port}"))
|
||||
};
|
||||
let grpc_url = if let Some(grpc_addr) = &shard.listen_grpc_addr {
|
||||
let host = grpc_addr;
|
||||
let port = shard.listen_grpc_port.expect("no gRPC port");
|
||||
Some(format!("grpc://no_user@{host}:{port}"))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let shard_info = PageserverShardInfo {
|
||||
pageservers: vec![PageserverShardConnectionInfo {
|
||||
id: Some(shard.node_id.to_string()),
|
||||
libpq_url,
|
||||
grpc_url,
|
||||
}],
|
||||
};
|
||||
|
||||
shards.insert(shard.shard_id.to_index(), shard_info);
|
||||
}
|
||||
|
||||
let stripe_size = if response.shard_params.count.is_unsharded() {
|
||||
None
|
||||
} else {
|
||||
Some(response.shard_params.stripe_size.0)
|
||||
};
|
||||
Ok(PageserverConnectionInfo {
|
||||
shard_count: response.shard_params.count,
|
||||
stripe_size,
|
||||
shards,
|
||||
prefer_protocol: PageserverProtocol::default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -108,10 +108,11 @@ pub enum PromoteState {
|
||||
Failed { error: String },
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default, Debug)]
|
||||
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct PromoteConfig {
|
||||
pub spec: ComputeSpec,
|
||||
/// Result of /safekeepers_lsn
|
||||
pub struct SafekeepersLsn {
|
||||
pub safekeepers: String,
|
||||
pub wal_flush_lsn: utils::lsn::Lsn,
|
||||
}
|
||||
|
||||
@@ -172,11 +173,6 @@ pub enum ComputeStatus {
|
||||
TerminationPendingImmediate,
|
||||
// Terminated Postgres
|
||||
Terminated,
|
||||
// A spec refresh is being requested
|
||||
RefreshConfigurationPending,
|
||||
// A spec refresh is being applied. We cannot refresh configuration again until the current
|
||||
// refresh is done, i.e., signal_refresh_configuration() will return 500 error.
|
||||
RefreshConfiguration,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
@@ -189,10 +185,6 @@ impl Display for ComputeStatus {
|
||||
match self {
|
||||
ComputeStatus::Empty => f.write_str("empty"),
|
||||
ComputeStatus::ConfigurationPending => f.write_str("configuration-pending"),
|
||||
ComputeStatus::RefreshConfiguration => f.write_str("refresh-configuration"),
|
||||
ComputeStatus::RefreshConfigurationPending => {
|
||||
f.write_str("refresh-configuration-pending")
|
||||
}
|
||||
ComputeStatus::Init => f.write_str("init"),
|
||||
ComputeStatus::Running => f.write_str("running"),
|
||||
ComputeStatus::Configuration => f.write_str("configuration"),
|
||||
|
||||
@@ -14,6 +14,7 @@ use serde::{Deserialize, Serialize};
|
||||
use url::Url;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
use utils::shard::{ShardCount, ShardIndex};
|
||||
|
||||
use crate::responses::TlsConfig;
|
||||
|
||||
@@ -105,6 +106,17 @@ pub struct ComputeSpec {
|
||||
// updated to fill these fields, we can make these non optional.
|
||||
pub tenant_id: Option<TenantId>,
|
||||
pub timeline_id: Option<TimelineId>,
|
||||
|
||||
/// Pageserver information can be passed in three different ways:
|
||||
/// 1. Here in `pageserver_connection_info`
|
||||
/// 2. In the `pageserver_connstring` field.
|
||||
/// 3. in `cluster.settings`.
|
||||
///
|
||||
/// The goal is to use method 1. everywhere. But for backwards-compatibility with old
|
||||
/// versions of the control plane, `compute_ctl` will check 2. and 3. if the
|
||||
/// `pageserver_connection_info` field is missing.
|
||||
pub pageserver_connection_info: Option<PageserverConnectionInfo>,
|
||||
|
||||
pub pageserver_connstring: Option<String>,
|
||||
|
||||
// More neon ids that we expose to the compute_ctl
|
||||
@@ -141,7 +153,7 @@ pub struct ComputeSpec {
|
||||
|
||||
// Stripe size for pageserver sharding, in pages
|
||||
#[serde(default)]
|
||||
pub shard_stripe_size: Option<usize>,
|
||||
pub shard_stripe_size: Option<u32>,
|
||||
|
||||
/// Local Proxy configuration used for JWT authentication
|
||||
#[serde(default)]
|
||||
@@ -214,6 +226,32 @@ pub enum ComputeFeature {
|
||||
UnknownFeature,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
|
||||
pub struct PageserverConnectionInfo {
|
||||
/// NB: 0 for unsharded tenants, 1 for sharded tenants with 1 shard, following storage
|
||||
pub shard_count: ShardCount,
|
||||
|
||||
/// INVARIANT: null if shard_count is 0, otherwise non-null and immutable
|
||||
pub stripe_size: Option<u32>,
|
||||
|
||||
pub shards: HashMap<ShardIndex, PageserverShardInfo>,
|
||||
|
||||
#[serde(default)]
|
||||
pub prefer_protocol: PageserverProtocol,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
|
||||
pub struct PageserverShardInfo {
|
||||
pub pageservers: Vec<PageserverShardConnectionInfo>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
|
||||
pub struct PageserverShardConnectionInfo {
|
||||
pub id: Option<String>,
|
||||
pub libpq_url: Option<String>,
|
||||
pub grpc_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
pub struct RemoteExtSpec {
|
||||
pub public_extensions: Option<Vec<String>>,
|
||||
@@ -331,6 +369,12 @@ impl ComputeMode {
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ComputeMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.to_type_str())
|
||||
}
|
||||
}
|
||||
|
||||
/// Log level for audit logging
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub enum ComputeAudit {
|
||||
@@ -467,13 +511,15 @@ pub struct JwksSettings {
|
||||
pub jwt_audience: Option<String>,
|
||||
}
|
||||
|
||||
/// Protocol used to connect to a Pageserver. Parsed from the connstring scheme.
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
|
||||
/// Protocol used to connect to a Pageserver.
|
||||
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub enum PageserverProtocol {
|
||||
/// The original protocol based on libpq and COPY. Uses postgresql:// or postgres:// scheme.
|
||||
#[default]
|
||||
#[serde(rename = "libpq")]
|
||||
Libpq,
|
||||
/// A newer, gRPC-based protocol. Uses grpc:// scheme.
|
||||
#[serde(rename = "grpc")]
|
||||
Grpc,
|
||||
}
|
||||
|
||||
|
||||
@@ -6,15 +6,26 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror.workspace = true
|
||||
nix.workspace=true
|
||||
nix.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
libc.workspace = true
|
||||
lock_api.workspace = true
|
||||
rustc-hash.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
rand = "0.9"
|
||||
rand_distr = "0.5.1"
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh3"] }
|
||||
ahash.workspace = true
|
||||
twox-hash = { version = "2.1.1" }
|
||||
seahash = "4.1.0"
|
||||
hashbrown = { git = "https://github.com/quantumish/hashbrown.git", rev = "6610e6d" }
|
||||
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
tempfile = "3.14.0"
|
||||
|
||||
[dev-dependencies]
|
||||
rand.workspace = true
|
||||
rand_distr = "0.5.1"
|
||||
[[bench]]
|
||||
name = "hmap_resize"
|
||||
harness = false
|
||||
|
||||
330
libs/neon-shmem/benches/hmap_resize.rs
Normal file
330
libs/neon-shmem/benches/hmap_resize.rs
Normal file
@@ -0,0 +1,330 @@
|
||||
use criterion::{BatchSize, BenchmarkId, Criterion, criterion_group, criterion_main};
|
||||
use neon_shmem::hash::HashMapAccess;
|
||||
use neon_shmem::hash::HashMapInit;
|
||||
use neon_shmem::hash::entry::Entry;
|
||||
use rand::distr::{Distribution, StandardUniform};
|
||||
use rand::prelude::*;
|
||||
use std::default::Default;
|
||||
use std::hash::BuildHasher;
|
||||
|
||||
// Taken from bindings to C code
|
||||
|
||||
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct FileCacheKey {
|
||||
pub _spc_id: u32,
|
||||
pub _db_id: u32,
|
||||
pub _rel_number: u32,
|
||||
pub _fork_num: u32,
|
||||
pub _block_num: u32,
|
||||
}
|
||||
|
||||
impl Distribution<FileCacheKey> for StandardUniform {
|
||||
// questionable, but doesn't need to be good randomness
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> FileCacheKey {
|
||||
FileCacheKey {
|
||||
_spc_id: rng.random(),
|
||||
_db_id: rng.random(),
|
||||
_rel_number: rng.random(),
|
||||
_fork_num: rng.random(),
|
||||
_block_num: rng.random(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct FileCacheEntry {
|
||||
pub _offset: u32,
|
||||
pub _access_count: u32,
|
||||
pub _prev: *mut FileCacheEntry,
|
||||
pub _next: *mut FileCacheEntry,
|
||||
pub _state: [u32; 8],
|
||||
}
|
||||
|
||||
impl FileCacheEntry {
|
||||
fn dummy() -> Self {
|
||||
Self {
|
||||
_offset: 0,
|
||||
_access_count: 0,
|
||||
_prev: std::ptr::null_mut(),
|
||||
_next: std::ptr::null_mut(),
|
||||
_state: [0; 8],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Utilities for applying operations.
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct TestOp<K, V>(K, Option<V>);
|
||||
|
||||
fn apply_op<K: Clone + std::hash::Hash + Eq, V, S: std::hash::BuildHasher>(
|
||||
op: TestOp<K, V>,
|
||||
map: &mut HashMapAccess<K, V, S>,
|
||||
) {
|
||||
let entry = map.entry(op.0);
|
||||
|
||||
match op.1 {
|
||||
Some(new) => match entry {
|
||||
Entry::Occupied(mut e) => Some(e.insert(new)),
|
||||
Entry::Vacant(e) => {
|
||||
_ = e.insert(new).unwrap();
|
||||
None
|
||||
}
|
||||
},
|
||||
None => match entry {
|
||||
Entry::Occupied(e) => Some(e.remove()),
|
||||
Entry::Vacant(_) => None,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Hash utilities
|
||||
|
||||
struct SeaRandomState {
|
||||
k1: u64,
|
||||
k2: u64,
|
||||
k3: u64,
|
||||
k4: u64,
|
||||
}
|
||||
|
||||
impl std::hash::BuildHasher for SeaRandomState {
|
||||
type Hasher = seahash::SeaHasher;
|
||||
|
||||
fn build_hasher(&self) -> Self::Hasher {
|
||||
seahash::SeaHasher::with_seeds(self.k1, self.k2, self.k3, self.k4)
|
||||
}
|
||||
}
|
||||
|
||||
impl SeaRandomState {
|
||||
fn new() -> Self {
|
||||
let mut rng = rand::rng();
|
||||
Self {
|
||||
k1: rng.random(),
|
||||
k2: rng.random(),
|
||||
k3: rng.random(),
|
||||
k4: rng.random(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn small_benchs(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("Small maps");
|
||||
group.sample_size(10);
|
||||
|
||||
group.bench_function("small_rehash", |b| {
|
||||
let ideal_filled = 4_000_000;
|
||||
let size = 5_000_000;
|
||||
let mut writer = HashMapInit::new_resizeable(size, size * 2).attach_writer();
|
||||
let mut rng = rand::rng();
|
||||
while writer.get_num_buckets_in_use() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
apply_op(TestOp(key, Some(val)), &mut writer);
|
||||
}
|
||||
b.iter(|| writer.shuffle());
|
||||
});
|
||||
|
||||
group.bench_function("small_rehash_xxhash", |b| {
|
||||
let ideal_filled = 4_000_000;
|
||||
let size = 5_000_000;
|
||||
let mut writer = HashMapInit::new_resizeable(size, size * 2)
|
||||
.with_hasher(twox_hash::xxhash64::RandomState::default())
|
||||
.attach_writer();
|
||||
let mut rng = rand::rng();
|
||||
while writer.get_num_buckets_in_use() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
apply_op(TestOp(key, Some(val)), &mut writer);
|
||||
}
|
||||
b.iter(|| writer.shuffle());
|
||||
});
|
||||
|
||||
group.bench_function("small_rehash_ahash", |b| {
|
||||
let ideal_filled = 4_000_000;
|
||||
let size = 5_000_000;
|
||||
let mut writer = HashMapInit::new_resizeable(size, size * 2)
|
||||
.with_hasher(ahash::RandomState::default())
|
||||
.attach_writer();
|
||||
let mut rng = rand::rng();
|
||||
while writer.get_num_buckets_in_use() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
apply_op(TestOp(key, Some(val)), &mut writer);
|
||||
}
|
||||
b.iter(|| writer.shuffle());
|
||||
});
|
||||
|
||||
group.bench_function("small_rehash_seahash", |b| {
|
||||
let ideal_filled = 4_000_000;
|
||||
let size = 5_000_000;
|
||||
let mut writer = HashMapInit::new_resizeable(size, size * 2)
|
||||
.with_hasher(SeaRandomState::new())
|
||||
.attach_writer();
|
||||
let mut rng = rand::rng();
|
||||
while writer.get_num_buckets_in_use() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
apply_op(TestOp(key, Some(val)), &mut writer);
|
||||
}
|
||||
b.iter(|| writer.shuffle());
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn real_benchs(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("Realistic workloads");
|
||||
group.sample_size(10);
|
||||
group.bench_function("real_bulk_insert", |b| {
|
||||
let size = 125_000_000;
|
||||
let ideal_filled = 100_000_000;
|
||||
let mut rng = rand::rng();
|
||||
b.iter_batched(
|
||||
|| HashMapInit::new_resizeable(size, size * 2).attach_writer(),
|
||||
|writer| {
|
||||
for _ in 0..ideal_filled {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
let entry = writer.entry(key);
|
||||
match entry {
|
||||
Entry::Occupied(mut e) => {
|
||||
std::hint::black_box(e.insert(val));
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
let _ = std::hint::black_box(e.insert(val).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
group.bench_function("real_rehash", |b| {
|
||||
let size = 125_000_000;
|
||||
let ideal_filled = 100_000_000;
|
||||
let mut writer = HashMapInit::new_resizeable(size, size).attach_writer();
|
||||
let mut rng = rand::rng();
|
||||
while writer.get_num_buckets_in_use() < ideal_filled {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
apply_op(TestOp(key, Some(val)), &mut writer);
|
||||
}
|
||||
b.iter(|| writer.shuffle());
|
||||
});
|
||||
|
||||
group.bench_function("real_rehash_hashbrown", |b| {
|
||||
let size = 125_000_000;
|
||||
let ideal_filled = 100_000_000;
|
||||
let mut writer = hashbrown::raw::RawTable::new();
|
||||
let mut rng = rand::rng();
|
||||
let hasher = rustc_hash::FxBuildHasher;
|
||||
unsafe {
|
||||
writer
|
||||
.resize(
|
||||
size,
|
||||
|(k, _)| hasher.hash_one(k),
|
||||
hashbrown::raw::Fallibility::Infallible,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
while writer.len() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
writer.insert(hasher.hash_one(&key), (key, val), |(k, _)| {
|
||||
hasher.hash_one(k)
|
||||
});
|
||||
}
|
||||
b.iter(|| unsafe {
|
||||
writer.table.rehash_in_place(
|
||||
&|table, index| {
|
||||
hasher.hash_one(
|
||||
&table
|
||||
.bucket::<(FileCacheKey, FileCacheEntry)>(index)
|
||||
.as_ref()
|
||||
.0,
|
||||
)
|
||||
},
|
||||
std::mem::size_of::<(FileCacheKey, FileCacheEntry)>(),
|
||||
if std::mem::needs_drop::<(FileCacheKey, FileCacheEntry)>() {
|
||||
Some(|ptr| std::ptr::drop_in_place(ptr as *mut (FileCacheKey, FileCacheEntry)))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
});
|
||||
});
|
||||
|
||||
for elems in [2, 4, 8, 16, 32, 64, 96, 112] {
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("real_rehash_varied", elems),
|
||||
&elems,
|
||||
|b, &size| {
|
||||
let ideal_filled = size * 1_000_000;
|
||||
let size = 125_000_000;
|
||||
let mut writer = HashMapInit::new_resizeable(size, size).attach_writer();
|
||||
let mut rng = rand::rng();
|
||||
while writer.get_num_buckets_in_use() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
apply_op(TestOp(key, Some(val)), &mut writer);
|
||||
}
|
||||
b.iter(|| writer.shuffle());
|
||||
},
|
||||
);
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("real_rehash_varied_hashbrown", elems),
|
||||
&elems,
|
||||
|b, &size| {
|
||||
let ideal_filled = size * 1_000_000;
|
||||
let size = 125_000_000;
|
||||
let mut writer = hashbrown::raw::RawTable::new();
|
||||
let mut rng = rand::rng();
|
||||
let hasher = rustc_hash::FxBuildHasher;
|
||||
unsafe {
|
||||
writer
|
||||
.resize(
|
||||
size,
|
||||
|(k, _)| hasher.hash_one(k),
|
||||
hashbrown::raw::Fallibility::Infallible,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
while writer.len() < ideal_filled as usize {
|
||||
let key: FileCacheKey = rng.random();
|
||||
let val = FileCacheEntry::dummy();
|
||||
writer.insert(hasher.hash_one(&key), (key, val), |(k, _)| {
|
||||
hasher.hash_one(k)
|
||||
});
|
||||
}
|
||||
b.iter(|| unsafe {
|
||||
writer.table.rehash_in_place(
|
||||
&|table, index| {
|
||||
hasher.hash_one(
|
||||
&table
|
||||
.bucket::<(FileCacheKey, FileCacheEntry)>(index)
|
||||
.as_ref()
|
||||
.0,
|
||||
)
|
||||
},
|
||||
std::mem::size_of::<(FileCacheKey, FileCacheEntry)>(),
|
||||
if std::mem::needs_drop::<(FileCacheKey, FileCacheEntry)>() {
|
||||
Some(|ptr| {
|
||||
std::ptr::drop_in_place(ptr as *mut (FileCacheKey, FileCacheEntry))
|
||||
})
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, small_benchs, real_benchs);
|
||||
criterion_main!(benches);
|
||||
@@ -16,6 +16,7 @@
|
||||
//!
|
||||
//! Concurrency is managed very simply: the entire map is guarded by one shared-memory RwLock.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::hash::{BuildHasher, Hash};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
@@ -56,6 +57,22 @@ pub struct HashMapInit<'a, K, V, S = rustc_hash::FxBuildHasher> {
|
||||
num_buckets: u32,
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> Debug for HashMapInit<'a, K, V, S>
|
||||
where
|
||||
K: Debug,
|
||||
V: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HashMapInit")
|
||||
.field("shmem_handle", &self.shmem_handle)
|
||||
.field("shared_ptr", &self.shared_ptr)
|
||||
.field("shared_size", &self.shared_size)
|
||||
// .field("hasher", &self.hasher)
|
||||
.field("num_buckets", &self.num_buckets)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a per-process handle to a hash table that (possibly) lives in shared memory.
|
||||
/// If a child process is launched with fork(), the child process should
|
||||
/// get its own HashMapAccess by calling HashMapInit::attach_writer/reader().
|
||||
@@ -71,6 +88,20 @@ pub struct HashMapAccess<'a, K, V, S = rustc_hash::FxBuildHasher> {
|
||||
unsafe impl<K: Sync, V: Sync, S> Sync for HashMapAccess<'_, K, V, S> {}
|
||||
unsafe impl<K: Send, V: Send, S> Send for HashMapAccess<'_, K, V, S> {}
|
||||
|
||||
impl<'a, K, V, S> Debug for HashMapAccess<'a, K, V, S>
|
||||
where
|
||||
K: Debug,
|
||||
V: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HashMapAccess")
|
||||
.field("shmem_handle", &self.shmem_handle)
|
||||
.field("shared_ptr", &self.shared_ptr)
|
||||
// .field("hasher", &self.hasher)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> {
|
||||
/// Change the 'hasher' used by the hash table.
|
||||
///
|
||||
@@ -298,7 +329,7 @@ where
|
||||
|
||||
/// Get a reference to the entry containing a key.
|
||||
///
|
||||
/// NB: THis takes a write lock as there's no way to distinguish whether the intention
|
||||
/// NB: This takes a write lock as there's no way to distinguish whether the intention
|
||||
/// is to use the entry for reading or for writing in advance.
|
||||
pub fn entry(&self, key: K) -> Entry<'a, '_, K, V> {
|
||||
let hash = self.get_hash_value(&key);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
//! Simple hash table with chaining.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
@@ -17,6 +18,19 @@ pub(crate) struct Bucket<K, V> {
|
||||
pub(crate) inner: Option<(K, V)>,
|
||||
}
|
||||
|
||||
impl<K, V> Debug for Bucket<K, V>
|
||||
where
|
||||
K: Debug,
|
||||
V: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Bucket")
|
||||
.field("next", &self.next)
|
||||
.field("inner", &self.inner)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Core hash table implementation.
|
||||
pub(crate) struct CoreHashMap<'a, K, V> {
|
||||
/// Dictionary used to map hashes to bucket indices.
|
||||
@@ -31,6 +45,22 @@ pub(crate) struct CoreHashMap<'a, K, V> {
|
||||
pub(crate) buckets_in_use: u32,
|
||||
}
|
||||
|
||||
impl<'a, K, V> Debug for CoreHashMap<'a, K, V>
|
||||
where
|
||||
K: Debug,
|
||||
V: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CoreHashMap")
|
||||
.field("dictionary", &self.dictionary)
|
||||
.field("buckets", &self.buckets)
|
||||
.field("free_head", &self.free_head)
|
||||
.field("alloc_limit", &self.alloc_limit)
|
||||
.field("buckets_in_use", &self.buckets_in_use)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for when there are no empty buckets left but one is needed.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct FullError;
|
||||
|
||||
@@ -61,6 +61,10 @@ impl<K, V> OccupiedEntry<'_, '_, K, V> {
|
||||
///
|
||||
/// This may result in multiple bucket accesses if the entry was obtained by index as the
|
||||
/// previous chain entry needs to be discovered in this case.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the `prev_pos` field is equal to [`PrevPos::Unknown`]. In practice, this means
|
||||
/// the entry was obtained via calling something like [`super::HashMapAccess::entry_at_bucket`].
|
||||
pub fn remove(mut self) -> V {
|
||||
// If this bucket was queried by index, go ahead and follow its chain from the start.
|
||||
let prev = if let PrevPos::Unknown(hash) = self.prev_pos {
|
||||
|
||||
@@ -21,6 +21,7 @@ use nix::unistd::ftruncate as nix_ftruncate;
|
||||
/// the underlying file is resized. Do not access the area beyond the current size. Currently, that
|
||||
/// will cause the file to be expanded, but we might use `mprotect()` etc. to enforce that in the
|
||||
/// future.
|
||||
#[derive(Debug)]
|
||||
pub struct ShmemHandle {
|
||||
/// memfd file descriptor
|
||||
fd: OwnedFd,
|
||||
@@ -35,6 +36,7 @@ pub struct ShmemHandle {
|
||||
}
|
||||
|
||||
/// This is stored at the beginning in the shared memory area.
|
||||
#[derive(Debug)]
|
||||
struct SharedStruct {
|
||||
max_size: usize,
|
||||
|
||||
|
||||
@@ -1500,7 +1500,6 @@ pub struct TimelineArchivalConfigRequest {
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct TimelinePatchIndexPartRequest {
|
||||
pub rel_size_migration: Option<RelSizeMigration>,
|
||||
pub rel_size_migrated_at: Option<Lsn>,
|
||||
pub gc_compaction_last_completed_lsn: Option<Lsn>,
|
||||
pub applied_gc_cutoff_lsn: Option<Lsn>,
|
||||
#[serde(default)]
|
||||
@@ -1534,10 +1533,10 @@ pub enum RelSizeMigration {
|
||||
/// `None` is the same as `Some(RelSizeMigration::Legacy)`.
|
||||
Legacy,
|
||||
/// The tenant is migrating to the new rel_size format. Both old and new rel_size format are
|
||||
/// persisted in the storage. The read path will read both formats and validate them.
|
||||
/// persisted in the index part. The read path will read both formats and merge them.
|
||||
Migrating,
|
||||
/// The tenant has migrated to the new rel_size format. Only the new rel_size format is persisted
|
||||
/// in the storage, and the read path will not read the old format.
|
||||
/// in the index part, and the read path will not read the old format.
|
||||
Migrated,
|
||||
}
|
||||
|
||||
@@ -1620,7 +1619,6 @@ pub struct TimelineInfo {
|
||||
|
||||
/// The status of the rel_size migration.
|
||||
pub rel_size_migration: Option<RelSizeMigration>,
|
||||
pub rel_size_migrated_at: Option<Lsn>,
|
||||
|
||||
/// Whether the timeline is invisible in synthetic size calculations.
|
||||
pub is_invisible: Option<bool>,
|
||||
|
||||
@@ -15,7 +15,6 @@ use tokio::sync::mpsc;
|
||||
use crate::cancel_token::RawCancelToken;
|
||||
use crate::codec::{BackendMessages, FrontendMessage, RecordNotices};
|
||||
use crate::config::{Host, SslMode};
|
||||
use crate::connection::gc_bytesmut;
|
||||
use crate::query::RowStream;
|
||||
use crate::simple_query::SimpleQueryStream;
|
||||
use crate::types::{Oid, Type};
|
||||
@@ -96,13 +95,20 @@ impl InnerClient {
|
||||
Ok(PartialQuery(Some(self)))
|
||||
}
|
||||
|
||||
// pub fn send_with_sync<F>(&mut self, f: F) -> Result<&mut Responses, Error>
|
||||
// where
|
||||
// F: FnOnce(&mut BytesMut) -> Result<(), Error>,
|
||||
// {
|
||||
// self.start()?.send_with_sync(f)
|
||||
// }
|
||||
|
||||
pub fn send_simple_query(&mut self, query: &str) -> Result<&mut Responses, Error> {
|
||||
self.responses.waiting += 1;
|
||||
|
||||
self.buffer.clear();
|
||||
// simple queries do not need sync.
|
||||
frontend::query(query, &mut self.buffer).map_err(Error::encode)?;
|
||||
let buf = self.buffer.split();
|
||||
let buf = self.buffer.split().freeze();
|
||||
self.send_message(FrontendMessage::Raw(buf))
|
||||
}
|
||||
|
||||
@@ -119,7 +125,7 @@ impl Drop for PartialQuery<'_> {
|
||||
if let Some(client) = self.0.take() {
|
||||
client.buffer.clear();
|
||||
frontend::sync(&mut client.buffer);
|
||||
let buf = client.buffer.split();
|
||||
let buf = client.buffer.split().freeze();
|
||||
let _ = client.send_message(FrontendMessage::Raw(buf));
|
||||
}
|
||||
}
|
||||
@@ -135,7 +141,7 @@ impl<'a> PartialQuery<'a> {
|
||||
client.buffer.clear();
|
||||
f(&mut client.buffer)?;
|
||||
frontend::flush(&mut client.buffer);
|
||||
let buf = client.buffer.split();
|
||||
let buf = client.buffer.split().freeze();
|
||||
client.send_message(FrontendMessage::Raw(buf))
|
||||
}
|
||||
|
||||
@@ -148,7 +154,7 @@ impl<'a> PartialQuery<'a> {
|
||||
client.buffer.clear();
|
||||
f(&mut client.buffer)?;
|
||||
frontend::sync(&mut client.buffer);
|
||||
let buf = client.buffer.split();
|
||||
let buf = client.buffer.split().freeze();
|
||||
let _ = client.send_message(FrontendMessage::Raw(buf));
|
||||
|
||||
Ok(&mut self.0.take().unwrap().responses)
|
||||
@@ -286,35 +292,8 @@ impl Client {
|
||||
simple_query::batch_execute(self.inner_mut(), query).await
|
||||
}
|
||||
|
||||
/// Similar to `discard_all`, but it does not clear any query plans
|
||||
///
|
||||
/// This runs in the background, so it can be executed without `await`ing.
|
||||
pub fn reset_session_background(&mut self) -> Result<(), Error> {
|
||||
// "CLOSE ALL": closes any cursors
|
||||
// "SET SESSION AUTHORIZATION DEFAULT": resets the current_user back to the session_user
|
||||
// "RESET ALL": resets any GUCs back to their session defaults.
|
||||
// "DEALLOCATE ALL": deallocates any prepared statements
|
||||
// "UNLISTEN *": stops listening on all channels
|
||||
// "SELECT pg_advisory_unlock_all();": unlocks all advisory locks
|
||||
// "DISCARD TEMP;": drops all temporary tables
|
||||
// "DISCARD SEQUENCES;": deallocates all cached sequence state
|
||||
|
||||
let _responses = self.inner_mut().send_simple_query(
|
||||
"ROLLBACK;
|
||||
CLOSE ALL;
|
||||
SET SESSION AUTHORIZATION DEFAULT;
|
||||
RESET ALL;
|
||||
DEALLOCATE ALL;
|
||||
UNLISTEN *;
|
||||
SELECT pg_advisory_unlock_all();
|
||||
DISCARD TEMP;
|
||||
DISCARD SEQUENCES;",
|
||||
)?;
|
||||
|
||||
// Clean up memory usage.
|
||||
gc_bytesmut(&mut self.inner_mut().buffer);
|
||||
|
||||
Ok(())
|
||||
pub async fn discard_all(&mut self) -> Result<ReadyForQueryStatus, Error> {
|
||||
self.batch_execute("discard all").await
|
||||
}
|
||||
|
||||
/// Begins a new database transaction.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::BytesMut;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use postgres_protocol2::message::backend;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
pub enum FrontendMessage {
|
||||
Raw(BytesMut),
|
||||
Raw(Bytes),
|
||||
RecordNotices(RecordNotices),
|
||||
}
|
||||
|
||||
@@ -17,10 +17,7 @@ pub struct RecordNotices {
|
||||
}
|
||||
|
||||
pub enum BackendMessage {
|
||||
Normal {
|
||||
messages: BackendMessages,
|
||||
ready: bool,
|
||||
},
|
||||
Normal { messages: BackendMessages },
|
||||
Async(backend::Message),
|
||||
}
|
||||
|
||||
@@ -43,18 +40,11 @@ impl FallibleIterator for BackendMessages {
|
||||
|
||||
pub struct PostgresCodec;
|
||||
|
||||
impl Encoder<BytesMut> for PostgresCodec {
|
||||
impl Encoder<Bytes> for PostgresCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> io::Result<()> {
|
||||
// When it comes to request/response workflows, we usually flush the entire write
|
||||
// buffer in order to wait for the response before we send a new request.
|
||||
// Therefore we can avoid the copy and just replace the buffer.
|
||||
if dst.is_empty() {
|
||||
*dst = item;
|
||||
} else {
|
||||
dst.extend_from_slice(&item);
|
||||
}
|
||||
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> io::Result<()> {
|
||||
dst.extend_from_slice(&item);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -66,7 +56,6 @@ impl Decoder for PostgresCodec {
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<BackendMessage>, io::Error> {
|
||||
let mut idx = 0;
|
||||
|
||||
let mut ready = false;
|
||||
while let Some(header) = backend::Header::parse(&src[idx..])? {
|
||||
let len = header.len() as usize + 1;
|
||||
if src[idx..].len() < len {
|
||||
@@ -90,7 +79,6 @@ impl Decoder for PostgresCodec {
|
||||
idx += len;
|
||||
|
||||
if header.tag() == backend::READY_FOR_QUERY_TAG {
|
||||
ready = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -100,7 +88,6 @@ impl Decoder for PostgresCodec {
|
||||
} else {
|
||||
Ok(Some(BackendMessage::Normal {
|
||||
messages: BackendMessages(src.split_to(idx)),
|
||||
ready,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,9 @@ use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
use crate::connect::connect;
|
||||
use crate::connect_raw::{self, StartupStream};
|
||||
use crate::connect_raw::{RawConnection, connect_raw};
|
||||
use crate::connect_tls::connect_tls;
|
||||
use crate::maybe_tls_stream::MaybeTlsStream;
|
||||
use crate::tls::{MakeTlsConnect, TlsConnect, TlsStream};
|
||||
use crate::{Client, Connection, Error};
|
||||
|
||||
@@ -243,27 +244,24 @@ impl Config {
|
||||
&self,
|
||||
stream: S,
|
||||
tls: T,
|
||||
) -> Result<StartupStream<S, T::Stream>, Error>
|
||||
) -> Result<RawConnection<S, T::Stream>, Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsConnect<S>,
|
||||
{
|
||||
let stream = connect_tls(stream, self.ssl_mode, tls).await?;
|
||||
let mut stream = StartupStream::new(stream);
|
||||
connect_raw::authenticate(&mut stream, self).await?;
|
||||
|
||||
Ok(stream)
|
||||
connect_raw(stream, self).await
|
||||
}
|
||||
|
||||
pub fn authenticate<S, T>(
|
||||
pub async fn authenticate<S, T>(
|
||||
&self,
|
||||
stream: &mut StartupStream<S, T>,
|
||||
) -> impl Future<Output = Result<(), Error>>
|
||||
stream: MaybeTlsStream<S, T>,
|
||||
) -> Result<RawConnection<S, T>, Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
connect_raw::authenticate(stream, self)
|
||||
connect_raw(stream, self).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use futures_util::TryStreamExt;
|
||||
use postgres_protocol2::message::backend::Message;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::client::SocketConfig;
|
||||
use crate::config::{Host, SslMode};
|
||||
use crate::connect_raw::StartupStream;
|
||||
use crate::config::Host;
|
||||
use crate::connect_raw::connect_raw;
|
||||
use crate::connect_socket::connect_socket;
|
||||
use crate::connect_tls::connect_tls;
|
||||
use crate::tls::{MakeTlsConnect, TlsConnect};
|
||||
use crate::{Client, Config, Connection, Error};
|
||||
use crate::{Client, Config, Connection, Error, RawConnection};
|
||||
|
||||
pub async fn connect<T>(
|
||||
tls: &T,
|
||||
@@ -45,36 +43,20 @@ where
|
||||
T: TlsConnect<TcpStream>,
|
||||
{
|
||||
let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?;
|
||||
let stream = config.tls_and_authenticate(socket, tls).await?;
|
||||
managed(
|
||||
let stream = connect_tls(socket, config.ssl_mode, tls).await?;
|
||||
let RawConnection {
|
||||
stream,
|
||||
host_addr,
|
||||
host.clone(),
|
||||
port,
|
||||
config.ssl_mode,
|
||||
config.connect_timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn managed<TlsStream>(
|
||||
mut stream: StartupStream<TcpStream, TlsStream>,
|
||||
host_addr: Option<IpAddr>,
|
||||
host: Host,
|
||||
port: u16,
|
||||
ssl_mode: SslMode,
|
||||
connect_timeout: Option<std::time::Duration>,
|
||||
) -> Result<(Client, Connection<TcpStream, TlsStream>), Error>
|
||||
where
|
||||
TlsStream: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let (process_id, secret_key) = wait_until_ready(&mut stream).await?;
|
||||
parameters: _,
|
||||
delayed_notice: _,
|
||||
process_id,
|
||||
secret_key,
|
||||
} = connect_raw(stream, config).await?;
|
||||
|
||||
let socket_config = SocketConfig {
|
||||
host_addr,
|
||||
host,
|
||||
host: host.clone(),
|
||||
port,
|
||||
connect_timeout,
|
||||
connect_timeout: config.connect_timeout,
|
||||
};
|
||||
|
||||
let (client_tx, conn_rx) = mpsc::unbounded_channel();
|
||||
@@ -83,37 +65,12 @@ where
|
||||
client_tx,
|
||||
client_rx,
|
||||
socket_config,
|
||||
ssl_mode,
|
||||
config.ssl_mode,
|
||||
process_id,
|
||||
secret_key,
|
||||
);
|
||||
|
||||
let stream = stream.into_framed();
|
||||
let connection = Connection::new(stream, conn_tx, conn_rx);
|
||||
|
||||
Ok((client, connection))
|
||||
}
|
||||
|
||||
async fn wait_until_ready<S, T>(stream: &mut StartupStream<S, T>) -> Result<(i32, i32), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut process_id = 0;
|
||||
let mut secret_key = 0;
|
||||
|
||||
loop {
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::BackendKeyData(body)) => {
|
||||
process_id = body.process_id();
|
||||
secret_key = body.secret_key();
|
||||
}
|
||||
// These values are currently not used by `Client`/`Connection`. Ignore them.
|
||||
Some(Message::ParameterStatus(_)) | Some(Message::NoticeResponse(_)) => {}
|
||||
Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key)),
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
Some(_) => return Err(Error::unexpected_message()),
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,52 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll, ready};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::BytesMut;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use futures_util::{SinkExt, Stream, TryStreamExt};
|
||||
use futures_util::{Sink, SinkExt, Stream, TryStreamExt, ready};
|
||||
use postgres_protocol2::authentication::sasl;
|
||||
use postgres_protocol2::authentication::sasl::ScramSha256;
|
||||
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message};
|
||||
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message, NoticeResponseBody};
|
||||
use postgres_protocol2::message::frontend;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tokio_util::codec::{Framed, FramedParts};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::codec::Framed;
|
||||
|
||||
use crate::Error;
|
||||
use crate::codec::PostgresCodec;
|
||||
use crate::codec::{BackendMessage, BackendMessages, PostgresCodec};
|
||||
use crate::config::{self, AuthKeys, Config};
|
||||
use crate::connection::{GC_THRESHOLD, INITIAL_CAPACITY};
|
||||
use crate::maybe_tls_stream::MaybeTlsStream;
|
||||
use crate::tls::TlsStream;
|
||||
|
||||
pub struct StartupStream<S, T> {
|
||||
inner: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
read_buf: BytesMut,
|
||||
buf: BackendMessages,
|
||||
delayed_notice: Vec<NoticeResponseBody>,
|
||||
}
|
||||
|
||||
impl<S, T> Sink<Bytes> for StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Error = io::Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_ready(cx)
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: Bytes) -> io::Result<()> {
|
||||
Pin::new(&mut self.inner).start_send(item)
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_close(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, T> Stream for StartupStream<S, T>
|
||||
@@ -31,109 +56,78 @@ where
|
||||
{
|
||||
type Item = io::Result<Message>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// We don't use `self.inner.poll_next()` as that might over-read into the read buffer.
|
||||
|
||||
// read 1 byte tag, 4 bytes length.
|
||||
let header = ready!(self.as_mut().poll_fill_buf_exact(cx, 5)?);
|
||||
|
||||
let len = u32::from_be_bytes(header[1..5].try_into().unwrap());
|
||||
if len < 4 {
|
||||
return Poll::Ready(Some(Err(std::io::Error::other(
|
||||
"postgres message too small",
|
||||
))));
|
||||
}
|
||||
if len >= 65536 {
|
||||
return Poll::Ready(Some(Err(std::io::Error::other(
|
||||
"postgres message too large",
|
||||
))));
|
||||
}
|
||||
|
||||
// the tag is an additional byte.
|
||||
let _message = ready!(self.as_mut().poll_fill_buf_exact(cx, len as usize + 1)?);
|
||||
|
||||
// Message::parse will remove the all the bytes from the buffer.
|
||||
Poll::Ready(Message::parse(&mut self.read_buf).transpose())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, T> StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
/// Fill the buffer until it's the exact length provided. No additional data will be read from the socket.
|
||||
///
|
||||
/// If the current buffer length is greater, nothing happens.
|
||||
fn poll_fill_buf_exact(
|
||||
self: Pin<&mut Self>,
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
len: usize,
|
||||
) -> Poll<Result<&[u8], std::io::Error>> {
|
||||
let this = self.get_mut();
|
||||
let mut stream = Pin::new(this.inner.get_mut());
|
||||
|
||||
let mut n = this.read_buf.len();
|
||||
while n < len {
|
||||
this.read_buf.resize(len, 0);
|
||||
|
||||
let mut buf = ReadBuf::new(&mut this.read_buf[..]);
|
||||
buf.set_filled(n);
|
||||
|
||||
if stream.as_mut().poll_read(cx, &mut buf)?.is_pending() {
|
||||
this.read_buf.truncate(n);
|
||||
return Poll::Pending;
|
||||
) -> Poll<Option<io::Result<Message>>> {
|
||||
loop {
|
||||
match self.buf.next() {
|
||||
Ok(Some(message)) => return Poll::Ready(Some(Ok(message))),
|
||||
Ok(None) => {}
|
||||
Err(e) => return Poll::Ready(Some(Err(e))),
|
||||
}
|
||||
|
||||
if buf.filled().len() == n {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"early eof",
|
||||
)));
|
||||
match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
|
||||
Some(Ok(BackendMessage::Normal { messages, .. })) => self.buf = messages,
|
||||
Some(Ok(BackendMessage::Async(message))) => return Poll::Ready(Some(Ok(message))),
|
||||
Some(Err(e)) => return Poll::Ready(Some(Err(e))),
|
||||
None => return Poll::Ready(None),
|
||||
}
|
||||
n = buf.filled().len();
|
||||
|
||||
this.read_buf.truncate(n);
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(&this.read_buf[..len]))
|
||||
}
|
||||
|
||||
pub fn into_framed(mut self) -> Framed<MaybeTlsStream<S, T>, PostgresCodec> {
|
||||
*self.inner.read_buffer_mut() = self.read_buf;
|
||||
self.inner
|
||||
}
|
||||
|
||||
pub fn new(io: MaybeTlsStream<S, T>) -> Self {
|
||||
let mut parts = FramedParts::new(io, PostgresCodec);
|
||||
parts.write_buf = BytesMut::with_capacity(INITIAL_CAPACITY);
|
||||
|
||||
let mut inner = Framed::from_parts(parts);
|
||||
|
||||
// This is the default already, but nice to be explicit.
|
||||
// We divide by two because writes will overshoot the boundary.
|
||||
// We don't want constant overshoots to cause us to constantly re-shrink the buffer.
|
||||
inner.set_backpressure_boundary(GC_THRESHOLD / 2);
|
||||
|
||||
Self {
|
||||
inner,
|
||||
read_buf: BytesMut::with_capacity(INITIAL_CAPACITY),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn authenticate<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
pub struct RawConnection<S, T> {
|
||||
pub stream: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
pub parameters: HashMap<String, String>,
|
||||
pub delayed_notice: Vec<NoticeResponseBody>,
|
||||
pub process_id: i32,
|
||||
pub secret_key: i32,
|
||||
}
|
||||
|
||||
pub async fn connect_raw<S, T>(
|
||||
stream: MaybeTlsStream<S, T>,
|
||||
config: &Config,
|
||||
) -> Result<(), Error>
|
||||
) -> Result<RawConnection<S, T>, Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
frontend::startup_message(&config.server_params, stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
let mut stream = StartupStream {
|
||||
inner: Framed::new(stream, PostgresCodec),
|
||||
buf: BackendMessages::empty(),
|
||||
delayed_notice: Vec::new(),
|
||||
};
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
startup(&mut stream, config).await?;
|
||||
authenticate(&mut stream, config).await?;
|
||||
let (process_id, secret_key, parameters) = read_info(&mut stream).await?;
|
||||
|
||||
Ok(RawConnection {
|
||||
stream: stream.inner,
|
||||
parameters,
|
||||
delayed_notice: stream.delayed_notice,
|
||||
process_id,
|
||||
secret_key,
|
||||
})
|
||||
}
|
||||
|
||||
async fn startup<S, T>(stream: &mut StartupStream<S, T>, config: &Config) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::startup_message(&config.server_params, &mut buf).map_err(Error::encode)?;
|
||||
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
async fn authenticate<S, T>(stream: &mut StartupStream<S, T>, config: &Config) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationOk) => {
|
||||
can_skip_channel_binding(config)?;
|
||||
@@ -147,8 +141,7 @@ where
|
||||
.as_ref()
|
||||
.ok_or_else(|| Error::config("password missing".into()))?;
|
||||
|
||||
frontend::password_message(pass, stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
authenticate_password(stream, pass).await?;
|
||||
}
|
||||
Some(Message::AuthenticationSasl(body)) => {
|
||||
authenticate_sasl(stream, body, config).await?;
|
||||
@@ -167,7 +160,6 @@ where
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationOk) => Ok(()),
|
||||
Some(Message::ErrorResponse(body)) => Err(Error::db(body)),
|
||||
@@ -185,6 +177,20 @@ fn can_skip_channel_binding(config: &Config) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authenticate_password<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
password: &[u8],
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::password_message(password, &mut buf).map_err(Error::encode)?;
|
||||
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
async fn authenticate_sasl<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
body: AuthenticationSaslBody,
|
||||
@@ -239,10 +245,10 @@ where
|
||||
return Err(Error::config("password or auth keys missing".into()));
|
||||
};
|
||||
|
||||
frontend::sasl_initial_response(mechanism, scram.message(), stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?;
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
let body = match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationSaslContinue(body)) => body,
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
@@ -255,10 +261,10 @@ where
|
||||
.await
|
||||
.map_err(|e| Error::authentication(e.into()))?;
|
||||
|
||||
frontend::sasl_response(scram.message(), stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?;
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
let body = match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationSaslFinal(body)) => body,
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
@@ -272,3 +278,35 @@ where
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_info<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
) -> Result<(i32, i32, HashMap<String, String>), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut process_id = 0;
|
||||
let mut secret_key = 0;
|
||||
let mut parameters = HashMap::new();
|
||||
|
||||
loop {
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::BackendKeyData(body)) => {
|
||||
process_id = body.process_id();
|
||||
secret_key = body.secret_key();
|
||||
}
|
||||
Some(Message::ParameterStatus(body)) => {
|
||||
parameters.insert(
|
||||
body.name().map_err(Error::parse)?.to_string(),
|
||||
body.value().map_err(Error::parse)?.to_string(),
|
||||
);
|
||||
}
|
||||
Some(Message::NoticeResponse(body)) => stream.delayed_notice.push(body),
|
||||
Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key, parameters)),
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
Some(_) => return Err(Error::unexpected_message()),
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,27 +44,6 @@ pub struct Connection<S, T> {
|
||||
state: State,
|
||||
}
|
||||
|
||||
pub const INITIAL_CAPACITY: usize = 2 * 1024;
|
||||
pub const GC_THRESHOLD: usize = 16 * 1024;
|
||||
|
||||
/// Gargabe collect the [`BytesMut`] if it has too much spare capacity.
|
||||
pub fn gc_bytesmut(buf: &mut BytesMut) {
|
||||
// We use a different mode to shrink the buf when above the threshold.
|
||||
// When above the threshold, we only re-allocate when the buf has 2x spare capacity.
|
||||
let reclaim = GC_THRESHOLD.checked_sub(buf.len()).unwrap_or(buf.len());
|
||||
|
||||
// `try_reclaim` tries to get the capacity from any shared `BytesMut`s,
|
||||
// before then comparing the length against the capacity.
|
||||
if buf.try_reclaim(reclaim) {
|
||||
let capacity = usize::max(buf.len(), INITIAL_CAPACITY);
|
||||
|
||||
// Allocate a new `BytesMut` so that we deallocate the old version.
|
||||
let mut new = BytesMut::with_capacity(capacity);
|
||||
new.extend_from_slice(buf);
|
||||
*buf = new;
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Never {}
|
||||
|
||||
impl<S, T> Connection<S, T>
|
||||
@@ -107,14 +86,7 @@ where
|
||||
continue;
|
||||
}
|
||||
BackendMessage::Async(_) => continue,
|
||||
BackendMessage::Normal { messages, ready } => {
|
||||
// if we read a ReadyForQuery from postgres, let's try GC the read buffer.
|
||||
if ready {
|
||||
gc_bytesmut(self.stream.read_buffer_mut());
|
||||
}
|
||||
|
||||
messages
|
||||
}
|
||||
BackendMessage::Normal { messages } => messages,
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -205,7 +177,12 @@ where
|
||||
// Send a terminate message to postgres
|
||||
Poll::Ready(None) => {
|
||||
trace!("poll_write: at eof, terminating");
|
||||
frontend::terminate(self.stream.write_buffer_mut());
|
||||
let mut request = BytesMut::new();
|
||||
frontend::terminate(&mut request);
|
||||
|
||||
Pin::new(&mut self.stream)
|
||||
.start_send(request.freeze())
|
||||
.map_err(Error::io)?;
|
||||
|
||||
trace!("poll_write: sent eof, closing");
|
||||
trace!("poll_write: done");
|
||||
@@ -228,10 +205,6 @@ where
|
||||
{
|
||||
Poll::Ready(()) => {
|
||||
trace!("poll_flush: flushed");
|
||||
|
||||
// GC the write buffer if we managed to flush
|
||||
gc_bytesmut(self.stream.write_buffer_mut());
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
Poll::Pending => {
|
||||
|
||||
@@ -452,16 +452,16 @@ impl Error {
|
||||
Error(Box::new(ErrorInner { kind, cause }))
|
||||
}
|
||||
|
||||
pub fn closed() -> Error {
|
||||
pub(crate) fn closed() -> Error {
|
||||
Error::new(Kind::Closed, None)
|
||||
}
|
||||
|
||||
pub fn unexpected_message() -> Error {
|
||||
pub(crate) fn unexpected_message() -> Error {
|
||||
Error::new(Kind::UnexpectedMessage, None)
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn db(error: ErrorResponseBody) -> Error {
|
||||
pub(crate) fn db(error: ErrorResponseBody) -> Error {
|
||||
match DbError::parse(&mut error.fields()) {
|
||||
Ok(e) => Error::new(Kind::Db, Some(Box::new(e))),
|
||||
Err(e) => Error::new(Kind::Parse, Some(Box::new(e))),
|
||||
@@ -493,7 +493,7 @@ impl Error {
|
||||
Error::new(Kind::Tls, Some(e))
|
||||
}
|
||||
|
||||
pub fn io(e: io::Error) -> Error {
|
||||
pub(crate) fn io(e: io::Error) -> Error {
|
||||
Error::new(Kind::Io, Some(Box::new(e)))
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ use postgres_protocol2::message::backend::ReadyForQueryBody;
|
||||
pub use crate::cancel_token::{CancelToken, RawCancelToken};
|
||||
pub use crate::client::{Client, SocketConfig};
|
||||
pub use crate::config::Config;
|
||||
pub use crate::connect_raw::RawConnection;
|
||||
pub use crate::connection::Connection;
|
||||
pub use crate::error::Error;
|
||||
pub use crate::generic_client::GenericClient;
|
||||
@@ -48,8 +49,8 @@ mod cancel_token;
|
||||
mod client;
|
||||
mod codec;
|
||||
pub mod config;
|
||||
pub mod connect;
|
||||
pub mod connect_raw;
|
||||
mod connect;
|
||||
mod connect_raw;
|
||||
mod connect_socket;
|
||||
mod connect_tls;
|
||||
mod connection;
|
||||
|
||||
@@ -301,12 +301,7 @@ pub struct PullTimelineRequest {
|
||||
pub tenant_id: TenantId,
|
||||
pub timeline_id: TimelineId,
|
||||
pub http_hosts: Vec<String>,
|
||||
/// Membership configuration to switch to after pull.
|
||||
/// It guarantees that if pull_timeline returns successfully, the timeline will
|
||||
/// not be deleted by request with an older generation.
|
||||
/// Storage controller always sets this field.
|
||||
/// None is only allowed for manual pull_timeline requests.
|
||||
pub mconf: Option<Configuration>,
|
||||
pub ignore_tombstone: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
||||
@@ -59,6 +59,10 @@ impl ShardCount {
|
||||
pub const MAX: Self = Self(u8::MAX);
|
||||
pub const MIN: Self = Self(0);
|
||||
|
||||
pub fn unsharded() -> Self {
|
||||
ShardCount(0)
|
||||
}
|
||||
|
||||
/// The internal value of a ShardCount may be zero, which means "1 shard, but use
|
||||
/// legacy format for TenantShardId that excludes the shard suffix", also known
|
||||
/// as [`TenantShardId::unsharded`].
|
||||
|
||||
@@ -429,11 +429,9 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState {
|
||||
};
|
||||
|
||||
let empty_wal_rate_limiter = crate::bindings::WalRateLimiter {
|
||||
effective_max_wal_bytes_per_second: crate::bindings::pg_atomic_uint32 { value: 0 },
|
||||
should_limit: crate::bindings::pg_atomic_uint32 { value: 0 },
|
||||
sent_bytes: 0,
|
||||
batch_start_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
batch_end_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
last_recorded_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
};
|
||||
|
||||
crate::bindings::WalproposerShmemState {
|
||||
|
||||
@@ -54,6 +54,7 @@ pageserver_api.workspace = true
|
||||
pageserver_client.workspace = true # for ResponseErrorMessageExt TOOD refactor that
|
||||
pageserver_compaction.workspace = true
|
||||
pageserver_page_api.workspace = true
|
||||
peekable.workspace = true
|
||||
pem.workspace = true
|
||||
pin-project-lite.workspace = true
|
||||
postgres_backend.workspace = true
|
||||
@@ -66,6 +67,7 @@ postgres-types.workspace = true
|
||||
posthog_client_lite.workspace = true
|
||||
pprof.workspace = true
|
||||
pq_proto.workspace = true
|
||||
prost.workspace = true
|
||||
rand.workspace = true
|
||||
range-set-blaze = { version = "0.1.16", features = ["alloc"] }
|
||||
regex.workspace = true
|
||||
|
||||
@@ -14,9 +14,9 @@ use utils::logging::warn_slow;
|
||||
|
||||
use crate::pool::{ChannelPool, ClientGuard, ClientPool, StreamGuard, StreamPool};
|
||||
use crate::retry::Retry;
|
||||
use crate::split::GetPageSplitter;
|
||||
use compute_api::spec::PageserverProtocol;
|
||||
use pageserver_page_api as page_api;
|
||||
use pageserver_page_api::GetPageSplitter;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
use utils::shard::{ShardCount, ShardIndex, ShardNumber, ShardStripeSize};
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
mod client;
|
||||
mod pool;
|
||||
mod retry;
|
||||
mod split;
|
||||
|
||||
pub use client::{PageserverClient, ShardSpec};
|
||||
pub use pageserver_api::shard::ShardStripeSize; // used in ShardSpec
|
||||
|
||||
@@ -19,7 +19,9 @@ pub mod proto {
|
||||
}
|
||||
|
||||
mod client;
|
||||
pub use client::Client;
|
||||
mod model;
|
||||
mod split;
|
||||
|
||||
pub use client::Client;
|
||||
pub use model::*;
|
||||
pub use split::GetPageSplitter;
|
||||
|
||||
@@ -33,6 +33,8 @@ pub enum ProtocolError {
|
||||
Invalid(&'static str, String),
|
||||
#[error("required field '{0}' is missing")]
|
||||
Missing(&'static str),
|
||||
#[error("invalid combination of not_modified_lsn '{0}' and request_lsn '{1}'")]
|
||||
InvalidLsns(Lsn, Lsn),
|
||||
}
|
||||
|
||||
impl ProtocolError {
|
||||
@@ -85,9 +87,9 @@ impl TryFrom<proto::ReadLsn> for ReadLsn {
|
||||
return Err(ProtocolError::invalid("request_lsn", pb.request_lsn));
|
||||
}
|
||||
if pb.not_modified_since_lsn > pb.request_lsn {
|
||||
return Err(ProtocolError::invalid(
|
||||
"not_modified_since_lsn",
|
||||
pb.not_modified_since_lsn,
|
||||
return Err(ProtocolError::InvalidLsns(
|
||||
Lsn(pb.not_modified_since_lsn),
|
||||
Lsn(pb.request_lsn),
|
||||
));
|
||||
}
|
||||
Ok(Self {
|
||||
|
||||
@@ -3,18 +3,18 @@ use std::collections::HashMap;
|
||||
use anyhow::anyhow;
|
||||
use bytes::Bytes;
|
||||
|
||||
use crate::model::*;
|
||||
use pageserver_api::key::rel_block_to_key;
|
||||
use pageserver_api::shard::key_to_shard_number;
|
||||
use pageserver_page_api as page_api;
|
||||
use utils::shard::{ShardCount, ShardIndex, ShardStripeSize};
|
||||
|
||||
/// Splits GetPageRequests that straddle shard boundaries and assembles the responses.
|
||||
/// TODO: add tests for this.
|
||||
pub struct GetPageSplitter {
|
||||
/// Split requests by shard index.
|
||||
requests: HashMap<ShardIndex, page_api::GetPageRequest>,
|
||||
requests: HashMap<ShardIndex, GetPageRequest>,
|
||||
/// The response being assembled. Preallocated with empty pages, to be filled in.
|
||||
response: page_api::GetPageResponse,
|
||||
response: GetPageResponse,
|
||||
/// Maps the offset in `request.block_numbers` and `response.pages` to the owning shard. Used
|
||||
/// to assemble the response pages in the same order as the original request.
|
||||
block_shards: Vec<ShardIndex>,
|
||||
@@ -24,7 +24,7 @@ impl GetPageSplitter {
|
||||
/// Checks if the given request only touches a single shard, and returns the shard ID. This is
|
||||
/// the common case, so we check first in order to avoid unnecessary allocations and overhead.
|
||||
pub fn for_single_shard(
|
||||
req: &page_api::GetPageRequest,
|
||||
req: &GetPageRequest,
|
||||
count: ShardCount,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
) -> anyhow::Result<Option<ShardIndex>> {
|
||||
@@ -57,7 +57,7 @@ impl GetPageSplitter {
|
||||
|
||||
/// Splits the given request.
|
||||
pub fn split(
|
||||
req: page_api::GetPageRequest,
|
||||
req: GetPageRequest,
|
||||
count: ShardCount,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
) -> anyhow::Result<Self> {
|
||||
@@ -84,7 +84,7 @@ impl GetPageSplitter {
|
||||
|
||||
requests
|
||||
.entry(shard_id)
|
||||
.or_insert_with(|| page_api::GetPageRequest {
|
||||
.or_insert_with(|| GetPageRequest {
|
||||
request_id: req.request_id,
|
||||
request_class: req.request_class,
|
||||
rel: req.rel,
|
||||
@@ -98,16 +98,16 @@ impl GetPageSplitter {
|
||||
|
||||
// Construct a response to be populated by shard responses. Preallocate empty page slots
|
||||
// with the expected block numbers.
|
||||
let response = page_api::GetPageResponse {
|
||||
let response = GetPageResponse {
|
||||
request_id: req.request_id,
|
||||
status_code: page_api::GetPageStatusCode::Ok,
|
||||
status_code: GetPageStatusCode::Ok,
|
||||
reason: None,
|
||||
rel: req.rel,
|
||||
pages: req
|
||||
.block_numbers
|
||||
.into_iter()
|
||||
.map(|block_number| {
|
||||
page_api::Page {
|
||||
Page {
|
||||
block_number,
|
||||
image: Bytes::new(), // empty page slot to be filled in
|
||||
}
|
||||
@@ -123,9 +123,7 @@ impl GetPageSplitter {
|
||||
}
|
||||
|
||||
/// Drains the per-shard requests, moving them out of the splitter to avoid extra allocations.
|
||||
pub fn drain_requests(
|
||||
&mut self,
|
||||
) -> impl Iterator<Item = (ShardIndex, page_api::GetPageRequest)> {
|
||||
pub fn drain_requests(&mut self) -> impl Iterator<Item = (ShardIndex, GetPageRequest)> {
|
||||
self.requests.drain()
|
||||
}
|
||||
|
||||
@@ -135,10 +133,10 @@ impl GetPageSplitter {
|
||||
pub fn add_response(
|
||||
&mut self,
|
||||
shard_id: ShardIndex,
|
||||
response: page_api::GetPageResponse,
|
||||
response: GetPageResponse,
|
||||
) -> anyhow::Result<()> {
|
||||
// The caller should already have converted status codes into tonic::Status.
|
||||
if response.status_code != page_api::GetPageStatusCode::Ok {
|
||||
if response.status_code != GetPageStatusCode::Ok {
|
||||
return Err(anyhow!(
|
||||
"unexpected non-OK response for shard {shard_id}: {} {}",
|
||||
response.status_code,
|
||||
@@ -209,7 +207,7 @@ impl GetPageSplitter {
|
||||
|
||||
/// Fetches the final, assembled response.
|
||||
#[allow(clippy::result_large_err)]
|
||||
pub fn get_response(self) -> anyhow::Result<page_api::GetPageResponse> {
|
||||
pub fn get_response(self) -> anyhow::Result<GetPageResponse> {
|
||||
// Check that the response is complete.
|
||||
for (i, page) in self.response.pages.iter().enumerate() {
|
||||
if page.image.is_empty() {
|
||||
@@ -25,6 +25,9 @@ tracing.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
tokio-util.workspace = true
|
||||
axum.workspace = true
|
||||
http.workspace = true
|
||||
metrics.workspace = true
|
||||
tonic.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
|
||||
@@ -34,6 +34,10 @@ use crate::util::{request_stats, tokio_thread_local_stats};
|
||||
/// GetPage@LatestLSN, uniformly distributed across the compute-accessible keyspace.
|
||||
#[derive(clap::Parser)]
|
||||
pub(crate) struct Args {
|
||||
#[clap(long, default_value = "false")]
|
||||
grpc: bool,
|
||||
#[clap(long, default_value = "false")]
|
||||
grpc_stream: bool,
|
||||
#[clap(long, default_value = "http://localhost:9898")]
|
||||
mgmt_api_endpoint: String,
|
||||
/// Pageserver connection string. Supports postgresql:// and grpc:// protocols.
|
||||
@@ -78,6 +82,9 @@ pub(crate) struct Args {
|
||||
#[clap(long)]
|
||||
set_io_mode: Option<pageserver_api::models::virtual_file::IoMode>,
|
||||
|
||||
#[clap(long)]
|
||||
only_relnode: Option<u32>,
|
||||
|
||||
/// Queue depth generated in each client.
|
||||
#[clap(long, default_value = "1")]
|
||||
queue_depth: NonZeroUsize,
|
||||
@@ -92,10 +99,31 @@ pub(crate) struct Args {
|
||||
#[clap(long, default_value = "1")]
|
||||
batch_size: NonZeroUsize,
|
||||
|
||||
#[clap(long)]
|
||||
only_relnode: Option<u32>,
|
||||
|
||||
targets: Option<Vec<TenantTimelineId>>,
|
||||
|
||||
#[clap(long, default_value = "100")]
|
||||
pool_max_consumers: NonZeroUsize,
|
||||
|
||||
#[clap(long, default_value = "5")]
|
||||
pool_error_threshold: NonZeroUsize,
|
||||
|
||||
#[clap(long, default_value = "5000")]
|
||||
pool_connect_timeout: NonZeroUsize,
|
||||
|
||||
#[clap(long, default_value = "1000")]
|
||||
pool_connect_backoff: NonZeroUsize,
|
||||
|
||||
#[clap(long, default_value = "60000")]
|
||||
pool_max_idle_duration: NonZeroUsize,
|
||||
|
||||
#[clap(long, default_value = "0")]
|
||||
max_delay_ms: usize,
|
||||
|
||||
#[clap(long, default_value = "0")]
|
||||
percent_drops: usize,
|
||||
|
||||
#[clap(long, default_value = "0")]
|
||||
percent_hangs: usize,
|
||||
}
|
||||
|
||||
/// State shared by all clients
|
||||
@@ -152,7 +180,6 @@ pub(crate) fn main(args: Args) -> anyhow::Result<()> {
|
||||
main_impl(args, thread_local_stats)
|
||||
})
|
||||
}
|
||||
|
||||
async fn main_impl(
|
||||
args: Args,
|
||||
all_thread_local_stats: AllThreadLocalStats<request_stats::Stats>,
|
||||
@@ -317,6 +344,7 @@ async fn main_impl(
|
||||
let rps_period = args
|
||||
.per_client_rate
|
||||
.map(|rps_limit| Duration::from_secs_f64(1.0 / (rps_limit as f64)));
|
||||
|
||||
let make_worker: &dyn Fn(WorkerId) -> Pin<Box<dyn Send + Future<Output = ()>>> = &|worker_id| {
|
||||
let ss = shared_state.clone();
|
||||
let cancel = cancel.clone();
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
//! from data stored in object storage.
|
||||
//!
|
||||
use std::fmt::Write as FmtWrite;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Instant, SystemTime};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
@@ -421,16 +420,12 @@ where
|
||||
}
|
||||
|
||||
let mut min_restart_lsn: Lsn = Lsn::MAX;
|
||||
|
||||
let mut dbdir_cnt = 0;
|
||||
let mut rel_cnt = 0;
|
||||
|
||||
// Create tablespace directories
|
||||
for ((spcnode, dbnode), has_relmap_file) in
|
||||
self.timeline.list_dbdirs(self.lsn, self.ctx).await?
|
||||
{
|
||||
self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
|
||||
dbdir_cnt += 1;
|
||||
|
||||
// If full backup is requested, include all relation files.
|
||||
// Otherwise only include init forks of unlogged relations.
|
||||
let rels = self
|
||||
@@ -438,7 +433,6 @@ where
|
||||
.list_rels(spcnode, dbnode, Version::at(self.lsn), self.ctx)
|
||||
.await?;
|
||||
for &rel in rels.iter() {
|
||||
rel_cnt += 1;
|
||||
// Send init fork as main fork to provide well formed empty
|
||||
// contents of UNLOGGED relations. Postgres copies it in
|
||||
// `reinit.c` during recovery.
|
||||
@@ -461,10 +455,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
self.timeline
|
||||
.db_rel_count
|
||||
.store(Some(Arc::new((dbdir_cnt, rel_cnt))));
|
||||
|
||||
let start_time = Instant::now();
|
||||
let aux_files = self
|
||||
.timeline
|
||||
|
||||
@@ -156,8 +156,6 @@ impl FeatureResolver {
|
||||
|
||||
let tenant_properties = PerTenantProperties {
|
||||
remote_size_mb: Some(rand::rng().random_range(100.0..1000000.00)),
|
||||
db_count_max: Some(rand::rng().random_range(1..1000)),
|
||||
rel_count_max: Some(rand::rng().random_range(1..1000)),
|
||||
}
|
||||
.into_posthog_properties();
|
||||
|
||||
@@ -346,8 +344,6 @@ impl FeatureResolver {
|
||||
|
||||
struct PerTenantProperties {
|
||||
pub remote_size_mb: Option<f64>,
|
||||
pub db_count_max: Option<usize>,
|
||||
pub rel_count_max: Option<usize>,
|
||||
}
|
||||
|
||||
impl PerTenantProperties {
|
||||
@@ -359,18 +355,6 @@ impl PerTenantProperties {
|
||||
PostHogFlagFilterPropertyValue::Number(remote_size_mb),
|
||||
);
|
||||
}
|
||||
if let Some(db_count) = self.db_count_max {
|
||||
properties.insert(
|
||||
"tenant_db_count_max".to_string(),
|
||||
PostHogFlagFilterPropertyValue::Number(db_count as f64),
|
||||
);
|
||||
}
|
||||
if let Some(rel_count) = self.rel_count_max {
|
||||
properties.insert(
|
||||
"tenant_rel_count_max".to_string(),
|
||||
PostHogFlagFilterPropertyValue::Number(rel_count as f64),
|
||||
);
|
||||
}
|
||||
properties
|
||||
}
|
||||
}
|
||||
@@ -425,11 +409,7 @@ impl TenantFeatureResolver {
|
||||
|
||||
/// Refresh the cached properties and flags on the critical path.
|
||||
pub fn refresh_properties_and_flags(&self, tenant_shard: &TenantShard) {
|
||||
// Any of the remote size is none => this property is none.
|
||||
let mut remote_size_mb = Some(0.0);
|
||||
// Any of the db or rel count is available => this property is available.
|
||||
let mut db_count_max = None;
|
||||
let mut rel_count_max = None;
|
||||
for timeline in tenant_shard.list_timelines() {
|
||||
let size = timeline.metrics.resident_physical_size_get();
|
||||
if size == 0 {
|
||||
@@ -439,25 +419,9 @@ impl TenantFeatureResolver {
|
||||
if let Some(ref mut remote_size_mb) = remote_size_mb {
|
||||
*remote_size_mb += size as f64 / 1024.0 / 1024.0;
|
||||
}
|
||||
if let Some(data) = timeline.db_rel_count.load_full() {
|
||||
let (db_count, rel_count) = *data.as_ref();
|
||||
if db_count_max.is_none() {
|
||||
db_count_max = Some(db_count);
|
||||
}
|
||||
if rel_count_max.is_none() {
|
||||
rel_count_max = Some(rel_count);
|
||||
}
|
||||
db_count_max = db_count_max.map(|max| max.max(db_count));
|
||||
rel_count_max = rel_count_max.map(|max| max.max(rel_count));
|
||||
}
|
||||
}
|
||||
self.cached_tenant_properties.store(Arc::new(
|
||||
PerTenantProperties {
|
||||
remote_size_mb,
|
||||
db_count_max,
|
||||
rel_count_max,
|
||||
}
|
||||
.into_posthog_properties(),
|
||||
PerTenantProperties { remote_size_mb }.into_posthog_properties(),
|
||||
));
|
||||
|
||||
// BEGIN: Update the feature flag on the critical path.
|
||||
|
||||
@@ -484,8 +484,6 @@ async fn build_timeline_info_common(
|
||||
*timeline.get_applied_gc_cutoff_lsn(),
|
||||
);
|
||||
|
||||
let (rel_size_migration, rel_size_migrated_at) = timeline.get_rel_size_v2_status();
|
||||
|
||||
let info = TimelineInfo {
|
||||
tenant_id: timeline.tenant_shard_id,
|
||||
timeline_id: timeline.timeline_id,
|
||||
@@ -517,8 +515,7 @@ async fn build_timeline_info_common(
|
||||
|
||||
state,
|
||||
is_archived: Some(is_archived),
|
||||
rel_size_migration: Some(rel_size_migration),
|
||||
rel_size_migrated_at,
|
||||
rel_size_migration: Some(timeline.get_rel_size_v2_status()),
|
||||
is_invisible: Some(is_invisible),
|
||||
|
||||
walreceiver_status,
|
||||
@@ -933,16 +930,9 @@ async fn timeline_patch_index_part_handler(
|
||||
active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
|
||||
.await?;
|
||||
|
||||
if request_data.rel_size_migration.is_none() && request_data.rel_size_migrated_at.is_some()
|
||||
{
|
||||
return Err(ApiError::BadRequest(anyhow!(
|
||||
"updating rel_size_migrated_at without rel_size_migration is not allowed"
|
||||
)));
|
||||
}
|
||||
|
||||
if let Some(rel_size_migration) = request_data.rel_size_migration {
|
||||
timeline
|
||||
.update_rel_size_v2_status(rel_size_migration, request_data.rel_size_migrated_at)
|
||||
.update_rel_size_v2_status(rel_size_migration)
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ pub async fn import_timeline_from_postgres_datadir(
|
||||
|
||||
// TODO this shoud be start_lsn, which is not necessarily equal to end_lsn (aka lsn)
|
||||
// Then fishing out pg_control would be unnecessary
|
||||
let mut modification = tline.begin_modification_for_import(pgdata_lsn);
|
||||
let mut modification = tline.begin_modification(pgdata_lsn);
|
||||
modification.init_empty()?;
|
||||
|
||||
// Import all but pg_wal
|
||||
@@ -309,7 +309,7 @@ async fn import_wal(
|
||||
waldecoder.feed_bytes(&buf);
|
||||
|
||||
let mut nrecords = 0;
|
||||
let mut modification = tline.begin_modification_for_import(last_lsn);
|
||||
let mut modification = tline.begin_modification(last_lsn);
|
||||
while last_lsn <= endpoint {
|
||||
if let Some((lsn, recdata)) = waldecoder.poll_decode()? {
|
||||
let interpreted = InterpretedWalRecord::from_bytes_filtered(
|
||||
@@ -357,7 +357,7 @@ pub async fn import_basebackup_from_tar(
|
||||
ctx: &RequestContext,
|
||||
) -> Result<()> {
|
||||
info!("importing base at {base_lsn}");
|
||||
let mut modification = tline.begin_modification_for_import(base_lsn);
|
||||
let mut modification = tline.begin_modification(base_lsn);
|
||||
modification.init_empty()?;
|
||||
|
||||
let mut pg_control: Option<ControlFileData> = None;
|
||||
@@ -457,7 +457,7 @@ pub async fn import_wal_from_tar(
|
||||
|
||||
waldecoder.feed_bytes(&bytes[offset..]);
|
||||
|
||||
let mut modification = tline.begin_modification_for_import(last_lsn);
|
||||
let mut modification = tline.begin_modification(last_lsn);
|
||||
while last_lsn <= end_lsn {
|
||||
if let Some((lsn, recdata)) = waldecoder.poll_decode()? {
|
||||
let interpreted = InterpretedWalRecord::from_bytes_filtered(
|
||||
|
||||
@@ -16,7 +16,8 @@ use anyhow::{Context as _, bail};
|
||||
use bytes::{Buf as _, BufMut as _, BytesMut};
|
||||
use chrono::Utc;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, Stream};
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures::{FutureExt, Stream, StreamExt as _};
|
||||
use itertools::Itertools;
|
||||
use jsonwebtoken::TokenData;
|
||||
use once_cell::sync::OnceCell;
|
||||
@@ -35,8 +36,8 @@ use pageserver_api::pagestream_api::{
|
||||
};
|
||||
use pageserver_api::reltag::SlruKind;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_page_api as page_api;
|
||||
use pageserver_page_api::proto;
|
||||
use pageserver_page_api::{self as page_api, GetPageSplitter};
|
||||
use postgres_backend::{
|
||||
AuthType, PostgresBackend, PostgresBackendReader, QueryError, is_expected_io_error,
|
||||
};
|
||||
@@ -443,6 +444,7 @@ impl TimelineHandles {
|
||||
handles: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get(
|
||||
&mut self,
|
||||
tenant_id: TenantId,
|
||||
@@ -469,6 +471,13 @@ impl TimelineHandles {
|
||||
fn tenant_id(&self) -> Option<TenantId> {
|
||||
self.wrapper.tenant_id.get().copied()
|
||||
}
|
||||
|
||||
/// Returns whether a child shard exists locally for the given shard.
|
||||
fn has_child_shard(&self, tenant_id: TenantId, shard_index: ShardIndex) -> bool {
|
||||
self.wrapper
|
||||
.tenant_manager
|
||||
.has_child_shard(tenant_id, shard_index)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct TenantManagerWrapper {
|
||||
@@ -535,7 +544,6 @@ impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrappe
|
||||
match resolved {
|
||||
ShardResolveResult::Found(tenant_shard) => break tenant_shard,
|
||||
ShardResolveResult::NotFound => {
|
||||
MISROUTED_PAGESTREAM_REQUESTS.inc();
|
||||
return Err(GetActiveTimelineError::Tenant(
|
||||
GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
|
||||
));
|
||||
@@ -3379,17 +3387,9 @@ impl GrpcPageServiceHandler {
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires a timeline handle for the given request.
|
||||
/// Acquires a timeline handle for the given request. The shard index must match a local shard.
|
||||
///
|
||||
/// TODO: during shard splits, the compute may still be sending requests to the parent shard
|
||||
/// until the entire split is committed and the compute is notified. Consider installing a
|
||||
/// temporary shard router from the parent to the children while the split is in progress.
|
||||
///
|
||||
/// TODO: consider moving this to a middleware layer; all requests need it. Needs to manage
|
||||
/// the TimelineHandles lifecycle.
|
||||
///
|
||||
/// TODO: untangle acquisition from TenantManagerWrapper::resolve() and Cache::get(), to avoid
|
||||
/// the unnecessary overhead.
|
||||
/// NB: this will fail during shard splits, see comment on [`Self::maybe_split_get_page`].
|
||||
async fn get_request_timeline(
|
||||
&self,
|
||||
req: &tonic::Request<impl Any>,
|
||||
@@ -3398,11 +3398,62 @@ impl GrpcPageServiceHandler {
|
||||
let shard_index = *extract::<ShardIndex>(req);
|
||||
let shard_selector = ShardSelector::Known(shard_index);
|
||||
|
||||
// TODO: untangle acquisition from TenantManagerWrapper::resolve() and Cache::get(), to
|
||||
// avoid the unnecessary overhead.
|
||||
TimelineHandles::new(self.tenant_manager.clone())
|
||||
.get(ttid.tenant_id, ttid.timeline_id, shard_selector)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Acquires a timeline handle for the given request, which must be for shard zero.
|
||||
///
|
||||
/// NB: during an ongoing shard split, the compute will keep talking to the parent shard until
|
||||
/// the split is committed, but the parent shard may have been removed in the meanwhile. In that
|
||||
/// case, we reroute the request to the new child shard. See [`Self::maybe_split_get_page`].
|
||||
///
|
||||
/// TODO: revamp the split protocol to avoid this child routing.
|
||||
async fn get_shard_zero_request_timeline(
|
||||
&self,
|
||||
req: &tonic::Request<impl Any>,
|
||||
) -> Result<Handle<TenantManagerTypes>, tonic::Status> {
|
||||
let ttid = *extract::<TenantTimelineId>(req);
|
||||
let shard_index = *extract::<ShardIndex>(req);
|
||||
|
||||
if shard_index.shard_number.0 != 0 {
|
||||
return Err(tonic::Status::invalid_argument(format!(
|
||||
"request must use shard zero (requested shard {shard_index})",
|
||||
)));
|
||||
}
|
||||
|
||||
// TODO: untangle acquisition from TenantManagerWrapper::resolve() and Cache::get(), to
|
||||
// avoid the unnecessary overhead.
|
||||
//
|
||||
// TODO: this does internal retries, which will delay requests during shard splits (we won't
|
||||
// look for the child until the parent's retries are exhausted). Don't do that.
|
||||
let mut handles = TimelineHandles::new(self.tenant_manager.clone());
|
||||
match handles
|
||||
.get(
|
||||
ttid.tenant_id,
|
||||
ttid.timeline_id,
|
||||
ShardSelector::Known(shard_index),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(timeline) => Ok(timeline),
|
||||
Err(err) => {
|
||||
// We may be in the middle of a shard split. Try to find a child shard 0.
|
||||
if let Ok(timeline) = handles
|
||||
.get(ttid.tenant_id, ttid.timeline_id, ShardSelector::Zero)
|
||||
.await
|
||||
&& timeline.get_shard_index().shard_count > shard_index.shard_count
|
||||
{
|
||||
return Ok(timeline);
|
||||
}
|
||||
Err(err.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts a SmgrOpTimer at received_at, throttles the request, and records execution start.
|
||||
/// Only errors if the timeline is shutting down.
|
||||
///
|
||||
@@ -3434,28 +3485,22 @@ impl GrpcPageServiceHandler {
|
||||
/// TODO: get_vectored() currently enforces a batch limit of 32. Postgres will typically send
|
||||
/// batches up to effective_io_concurrency = 100. Either we have to accept large batches, or
|
||||
/// split them up in the client or server.
|
||||
#[instrument(skip_all, fields(req_id, rel, blkno, blks, req_lsn, mod_lsn))]
|
||||
#[instrument(skip_all, fields(
|
||||
req_id = %req.request_id,
|
||||
rel = %req.rel,
|
||||
blkno = %req.block_numbers[0],
|
||||
blks = %req.block_numbers.len(),
|
||||
lsn = %req.read_lsn,
|
||||
))]
|
||||
async fn get_page(
|
||||
ctx: &RequestContext,
|
||||
timeline: &WeakHandle<TenantManagerTypes>,
|
||||
req: proto::GetPageRequest,
|
||||
timeline: Handle<TenantManagerTypes>,
|
||||
req: page_api::GetPageRequest,
|
||||
io_concurrency: IoConcurrency,
|
||||
) -> Result<proto::GetPageResponse, tonic::Status> {
|
||||
let received_at = Instant::now();
|
||||
let timeline = timeline.upgrade()?;
|
||||
received_at: Instant,
|
||||
) -> Result<page_api::GetPageResponse, tonic::Status> {
|
||||
let ctx = ctx.with_scope_page_service_pagestream(&timeline);
|
||||
|
||||
// Validate the request, decorate the span, and convert it to a Pagestream request.
|
||||
let req = page_api::GetPageRequest::try_from(req)?;
|
||||
|
||||
span_record!(
|
||||
req_id = %req.request_id,
|
||||
rel = %req.rel,
|
||||
blkno = %req.block_numbers[0],
|
||||
blks = %req.block_numbers.len(),
|
||||
lsn = %req.read_lsn,
|
||||
);
|
||||
|
||||
let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn(); // hold guard
|
||||
let effective_lsn = PageServerHandler::effective_request_lsn(
|
||||
&timeline,
|
||||
@@ -3530,7 +3575,96 @@ impl GrpcPageServiceHandler {
|
||||
};
|
||||
}
|
||||
|
||||
Ok(resp.into())
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
/// Processes a GetPage request when there is a potential shard split in progress. We have to
|
||||
/// reroute the request any local child shards, and split batch requests that straddle multiple
|
||||
/// child shards.
|
||||
///
|
||||
/// Parent shards are split and removed incrementally, but the compute is only notified once the
|
||||
/// entire split commits, which can take several minutes. In the meanwhile, the compute will be
|
||||
/// sending requests to the parent shard.
|
||||
///
|
||||
/// TODO: add test infrastructure to provoke this situation frequently and for long periods of
|
||||
/// time, to properly exercise it.
|
||||
///
|
||||
/// TODO: revamp the split protocol to avoid this, e.g.:
|
||||
/// * Keep the parent shard until the split commits and the compute is notified.
|
||||
/// * Notify the compute about each subsplit.
|
||||
/// * Return an error that updates the compute's shard map.
|
||||
#[instrument(skip_all)]
|
||||
async fn maybe_split_get_page(
|
||||
ctx: &RequestContext,
|
||||
handles: &mut TimelineHandles,
|
||||
ttid: TenantTimelineId,
|
||||
parent: ShardIndex,
|
||||
req: page_api::GetPageRequest,
|
||||
io_concurrency: IoConcurrency,
|
||||
received_at: Instant,
|
||||
) -> Result<page_api::GetPageResponse, tonic::Status> {
|
||||
// Check the first page to see if we have any child shards at all. Otherwise, the compute is
|
||||
// just talking to the wrong Pageserver. If the parent has been split, the shard now owning
|
||||
// the page must have a higher shard count.
|
||||
let timeline = handles
|
||||
.get(
|
||||
ttid.tenant_id,
|
||||
ttid.timeline_id,
|
||||
ShardSelector::Page(rel_block_to_key(req.rel, req.block_numbers[0])),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let shard_id = timeline.get_shard_identity();
|
||||
if shard_id.count <= parent.shard_count {
|
||||
return Err(HandleUpgradeError::ShutDown.into()); // emulate original error
|
||||
}
|
||||
|
||||
// Fast path: the request fits in a single shard.
|
||||
if let Some(shard_index) =
|
||||
GetPageSplitter::for_single_shard(&req, shard_id.count, Some(shard_id.stripe_size))
|
||||
.map_err(|err| tonic::Status::internal(err.to_string()))?
|
||||
{
|
||||
// We got the shard ID from the first page, so these must be equal.
|
||||
assert_eq!(shard_index.shard_number, shard_id.number);
|
||||
assert_eq!(shard_index.shard_count, shard_id.count);
|
||||
return Self::get_page(ctx, timeline, req, io_concurrency, received_at).await;
|
||||
}
|
||||
|
||||
// The request spans multiple shards; split it and dispatch parallel requests. All pages
|
||||
// were originally in the parent shard, and during a split all children are local, so we
|
||||
// expect to find local shards for all pages.
|
||||
let mut splitter = GetPageSplitter::split(req, shard_id.count, Some(shard_id.stripe_size))
|
||||
.map_err(|err| tonic::Status::internal(err.to_string()))?;
|
||||
|
||||
let mut shard_requests = FuturesUnordered::new();
|
||||
for (shard_index, shard_req) in splitter.drain_requests() {
|
||||
let timeline = handles
|
||||
.get(
|
||||
ttid.tenant_id,
|
||||
ttid.timeline_id,
|
||||
ShardSelector::Known(shard_index),
|
||||
)
|
||||
.await?;
|
||||
let future = Self::get_page(
|
||||
ctx,
|
||||
timeline,
|
||||
shard_req,
|
||||
io_concurrency.clone(),
|
||||
received_at,
|
||||
)
|
||||
.map(move |result| result.map(|resp| (shard_index, resp)));
|
||||
shard_requests.push(future);
|
||||
}
|
||||
|
||||
while let Some((shard_index, shard_response)) = shard_requests.next().await.transpose()? {
|
||||
splitter
|
||||
.add_response(shard_index, shard_response)
|
||||
.map_err(|err| tonic::Status::internal(err.to_string()))?;
|
||||
}
|
||||
|
||||
splitter
|
||||
.get_response()
|
||||
.map_err(|err| tonic::Status::internal(err.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3559,11 +3693,10 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
// to be the sweet spot where throughput is saturated.
|
||||
const CHUNK_SIZE: usize = 256 * 1024;
|
||||
|
||||
let timeline = self.get_request_timeline(&req).await?;
|
||||
let timeline = self.get_shard_zero_request_timeline(&req).await?;
|
||||
let ctx = self.ctx.with_scope_timeline(&timeline);
|
||||
|
||||
// Validate the request and decorate the span.
|
||||
Self::ensure_shard_zero(&timeline)?;
|
||||
if timeline.is_archived() == Some(true) {
|
||||
return Err(tonic::Status::failed_precondition("timeline is archived"));
|
||||
}
|
||||
@@ -3679,11 +3812,10 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
req: tonic::Request<proto::GetDbSizeRequest>,
|
||||
) -> Result<tonic::Response<proto::GetDbSizeResponse>, tonic::Status> {
|
||||
let received_at = extract::<ReceivedAt>(&req).0;
|
||||
let timeline = self.get_request_timeline(&req).await?;
|
||||
let timeline = self.get_shard_zero_request_timeline(&req).await?;
|
||||
let ctx = self.ctx.with_scope_page_service_pagestream(&timeline);
|
||||
|
||||
// Validate the request, decorate the span, and convert it to a Pagestream request.
|
||||
Self::ensure_shard_zero(&timeline)?;
|
||||
let req: page_api::GetDbSizeRequest = req.into_inner().try_into()?;
|
||||
|
||||
span_record!(db_oid=%req.db_oid, lsn=%req.read_lsn);
|
||||
@@ -3712,14 +3844,33 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
req: tonic::Request<tonic::Streaming<proto::GetPageRequest>>,
|
||||
) -> Result<tonic::Response<Self::GetPagesStream>, tonic::Status> {
|
||||
// Extract the timeline from the request and check that it exists.
|
||||
//
|
||||
// NB: during shard splits, the compute may still send requests to the parent shard. We'll
|
||||
// reroute requests to the child shards below, but we also detect the common cases here
|
||||
// where either the shard exists or no shards exist at all. If we have a child shard, we
|
||||
// can't acquire a weak handle because we don't know which child shard to use yet.
|
||||
//
|
||||
// TODO: TimelineHandles.get() does internal retries, which will delay requests during shard
|
||||
// splits. It shouldn't.
|
||||
let ttid = *extract::<TenantTimelineId>(&req);
|
||||
let shard_index = *extract::<ShardIndex>(&req);
|
||||
let shard_selector = ShardSelector::Known(shard_index);
|
||||
|
||||
let mut handles = TimelineHandles::new(self.tenant_manager.clone());
|
||||
handles
|
||||
.get(ttid.tenant_id, ttid.timeline_id, shard_selector)
|
||||
.await?;
|
||||
let timeline = match handles
|
||||
.get(
|
||||
ttid.tenant_id,
|
||||
ttid.timeline_id,
|
||||
ShardSelector::Known(shard_index),
|
||||
)
|
||||
.await
|
||||
{
|
||||
// The timeline shard exists. Keep a weak handle to reuse for each request.
|
||||
Ok(timeline) => Some(timeline.downgrade()),
|
||||
// The shard doesn't exist, but a child shard does. We'll reroute requests later.
|
||||
Err(_) if handles.has_child_shard(ttid.tenant_id, shard_index) => None,
|
||||
// Failed to fetch the timeline, and no child shard exists. Error out.
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
// Spawn an IoConcurrency sidecar, if enabled.
|
||||
let gate_guard = self
|
||||
@@ -3736,11 +3887,9 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
let mut reqs = req.into_inner();
|
||||
|
||||
let resps = async_stream::try_stream! {
|
||||
let timeline = handles
|
||||
.get(ttid.tenant_id, ttid.timeline_id, shard_selector)
|
||||
.await?
|
||||
.downgrade();
|
||||
loop {
|
||||
// Wait for the next client request.
|
||||
//
|
||||
// NB: Tonic considers the entire stream to be an in-flight request and will wait
|
||||
// for it to complete before shutting down. React to cancellation between requests.
|
||||
let req = tokio::select! {
|
||||
@@ -3753,16 +3902,43 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
}?;
|
||||
|
||||
let received_at = Instant::now();
|
||||
let req_id = req.request_id.map(page_api::RequestID::from).unwrap_or_default();
|
||||
let result = Self::get_page(&ctx, &timeline, req, io_concurrency.clone())
|
||||
|
||||
// Process the request, using a closure to capture errors.
|
||||
let process_request = async || {
|
||||
let req = page_api::GetPageRequest::try_from(req)?;
|
||||
|
||||
// Fast path: use the pre-acquired timeline handle.
|
||||
if let Some(Ok(timeline)) = timeline.as_ref().map(|t| t.upgrade()) {
|
||||
return Self::get_page(&ctx, timeline, req, io_concurrency.clone(), received_at)
|
||||
.instrument(span.clone()) // propagate request span
|
||||
.await
|
||||
}
|
||||
|
||||
// The timeline handle is stale. During shard splits, the compute may still be
|
||||
// sending requests to the parent shard. Try to re-route requests to the child
|
||||
// shards, and split any batch requests that straddle multiple child shards.
|
||||
Self::maybe_split_get_page(
|
||||
&ctx,
|
||||
&mut handles,
|
||||
ttid,
|
||||
shard_index,
|
||||
req,
|
||||
io_concurrency.clone(),
|
||||
received_at,
|
||||
)
|
||||
.instrument(span.clone()) // propagate request span
|
||||
.await;
|
||||
yield match result {
|
||||
Ok(resp) => resp,
|
||||
// Convert per-request errors to GetPageResponses as appropriate, or terminate
|
||||
// the stream with a tonic::Status. Log the error regardless, since
|
||||
// ObservabilityLayer can't automatically log stream errors.
|
||||
.await
|
||||
};
|
||||
|
||||
// Return the response. Convert per-request errors to GetPageResponses if
|
||||
// appropriate, or terminate the stream with a tonic::Status.
|
||||
yield match process_request().await {
|
||||
Ok(resp) => resp.into(),
|
||||
Err(status) => {
|
||||
// Log the error, since ObservabilityLayer won't see stream errors.
|
||||
// TODO: it would be nice if we could propagate the get_page() fields here.
|
||||
span.in_scope(|| {
|
||||
warn!("request failed with {:?}: {}", status.code(), status.message());
|
||||
@@ -3782,11 +3958,10 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
req: tonic::Request<proto::GetRelSizeRequest>,
|
||||
) -> Result<tonic::Response<proto::GetRelSizeResponse>, tonic::Status> {
|
||||
let received_at = extract::<ReceivedAt>(&req).0;
|
||||
let timeline = self.get_request_timeline(&req).await?;
|
||||
let timeline = self.get_shard_zero_request_timeline(&req).await?;
|
||||
let ctx = self.ctx.with_scope_page_service_pagestream(&timeline);
|
||||
|
||||
// Validate the request, decorate the span, and convert it to a Pagestream request.
|
||||
Self::ensure_shard_zero(&timeline)?;
|
||||
let req: page_api::GetRelSizeRequest = req.into_inner().try_into()?;
|
||||
let allow_missing = req.allow_missing;
|
||||
|
||||
@@ -3819,7 +3994,7 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
req: tonic::Request<proto::GetSlruSegmentRequest>,
|
||||
) -> Result<tonic::Response<proto::GetSlruSegmentResponse>, tonic::Status> {
|
||||
let received_at = extract::<ReceivedAt>(&req).0;
|
||||
let timeline = self.get_request_timeline(&req).await?;
|
||||
let timeline = self.get_shard_zero_request_timeline(&req).await?;
|
||||
let ctx = self.ctx.with_scope_page_service_pagestream(&timeline);
|
||||
|
||||
// Validate the request, decorate the span, and convert it to a Pagestream request.
|
||||
@@ -3853,6 +4028,10 @@ impl proto::PageService for GrpcPageServiceHandler {
|
||||
&self,
|
||||
req: tonic::Request<proto::LeaseLsnRequest>,
|
||||
) -> Result<tonic::Response<proto::LeaseLsnResponse>, tonic::Status> {
|
||||
// TODO: this won't work during shard splits, as the request is directed at a specific shard
|
||||
// but the parent shard is removed before the split commits and the compute is notified
|
||||
// (which can take several minutes for large tenants). That's also the case for the libpq
|
||||
// implementation, so we keep the behavior for now.
|
||||
let timeline = self.get_request_timeline(&req).await?;
|
||||
let ctx = self.ctx.with_scope_timeline(&timeline);
|
||||
|
||||
|
||||
@@ -6,9 +6,8 @@
|
||||
//! walingest.rs handles a few things like implicit relation creation and extension.
|
||||
//! Clarify that)
|
||||
//!
|
||||
use std::collections::{BTreeSet, HashMap, HashSet, hash_map};
|
||||
use std::collections::{HashMap, HashSet, hash_map};
|
||||
use std::ops::{ControlFlow, Range};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::walingest::{WalIngestError, WalIngestErrorKind};
|
||||
use crate::{PERF_TRACE_TARGET, ensure_walingest};
|
||||
@@ -227,25 +226,6 @@ impl Timeline {
|
||||
pending_nblocks: 0,
|
||||
pending_directory_entries: Vec::new(),
|
||||
pending_metadata_bytes: 0,
|
||||
is_importing_pgdata: false,
|
||||
lsn,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn begin_modification_for_import(&self, lsn: Lsn) -> DatadirModification
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
DatadirModification {
|
||||
tline: self,
|
||||
pending_lsns: Vec::new(),
|
||||
pending_metadata_pages: HashMap::new(),
|
||||
pending_data_batch: None,
|
||||
pending_deletions: Vec::new(),
|
||||
pending_nblocks: 0,
|
||||
pending_directory_entries: Vec::new(),
|
||||
pending_metadata_bytes: 0,
|
||||
is_importing_pgdata: true,
|
||||
lsn,
|
||||
}
|
||||
}
|
||||
@@ -615,50 +595,6 @@ impl Timeline {
|
||||
self.get_rel_exists_in_reldir(tag, version, None, ctx).await
|
||||
}
|
||||
|
||||
async fn get_rel_exists_in_reldir_v1(
|
||||
&self,
|
||||
tag: RelTag,
|
||||
version: Version<'_>,
|
||||
deserialized_reldir_v1: Option<(Key, &RelDirectory)>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<bool, PageReconstructError> {
|
||||
let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
|
||||
if let Some((cached_key, dir)) = deserialized_reldir_v1 {
|
||||
if cached_key == key {
|
||||
return Ok(dir.rels.contains(&(tag.relnode, tag.forknum)));
|
||||
} else if cfg!(test) || cfg!(feature = "testing") {
|
||||
panic!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
} else {
|
||||
warn!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
}
|
||||
// Fallback to reading the directory from the datadir.
|
||||
}
|
||||
|
||||
let buf = version.get(self, key, ctx).await?;
|
||||
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
Ok(dir.rels.contains(&(tag.relnode, tag.forknum)))
|
||||
}
|
||||
|
||||
async fn get_rel_exists_in_reldir_v2(
|
||||
&self,
|
||||
tag: RelTag,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<bool, PageReconstructError> {
|
||||
let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum);
|
||||
let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?).map_err(
|
||||
|_| {
|
||||
PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: decode failed, {}",
|
||||
key
|
||||
))
|
||||
},
|
||||
)?;
|
||||
let exists_v2 = buf == RelDirExists::Exists;
|
||||
Ok(exists_v2)
|
||||
}
|
||||
|
||||
/// Does the relation exist? With a cached deserialized `RelDirectory`.
|
||||
///
|
||||
/// There are some cases where the caller loops across all relations. In that specific case,
|
||||
@@ -690,134 +626,45 @@ impl Timeline {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let (v2_status, migrated_lsn) = self.get_rel_size_v2_status();
|
||||
// Read path: first read the new reldir keyspace. Early return if the relation exists.
|
||||
// Otherwise, read the old reldir keyspace.
|
||||
// TODO: if IndexPart::rel_size_migration is `Migrated`, we only need to read from v2.
|
||||
|
||||
match v2_status {
|
||||
RelSizeMigration::Legacy => {
|
||||
let v1_exists = self
|
||||
.get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx)
|
||||
.await?;
|
||||
Ok(v1_exists)
|
||||
}
|
||||
RelSizeMigration::Migrating | RelSizeMigration::Migrated
|
||||
if version.get_lsn() < migrated_lsn.unwrap_or(Lsn(0)) =>
|
||||
{
|
||||
// For requests below the migrated LSN, we still use the v1 read path.
|
||||
let v1_exists = self
|
||||
.get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx)
|
||||
.await?;
|
||||
Ok(v1_exists)
|
||||
}
|
||||
RelSizeMigration::Migrating => {
|
||||
let v1_exists = self
|
||||
.get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx)
|
||||
.await?;
|
||||
let v2_exists_res = self.get_rel_exists_in_reldir_v2(tag, version, ctx).await;
|
||||
match v2_exists_res {
|
||||
Ok(v2_exists) if v1_exists == v2_exists => {}
|
||||
Ok(v2_exists) => {
|
||||
tracing::warn!(
|
||||
"inconsistent v1/v2 reldir keyspace for rel {}: v1_exists={}, v2_exists={}",
|
||||
tag,
|
||||
v1_exists,
|
||||
v2_exists
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("failed to get rel exists in v2: {e}");
|
||||
}
|
||||
}
|
||||
Ok(v1_exists)
|
||||
}
|
||||
RelSizeMigration::Migrated => {
|
||||
let v2_exists = self.get_rel_exists_in_reldir_v2(tag, version, ctx).await?;
|
||||
Ok(v2_exists)
|
||||
if let RelSizeMigration::Migrated | RelSizeMigration::Migrating =
|
||||
self.get_rel_size_v2_status()
|
||||
{
|
||||
// fetch directory listing (new)
|
||||
let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum);
|
||||
let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?)
|
||||
.map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
|
||||
let exists_v2 = buf == RelDirExists::Exists;
|
||||
// Fast path: if the relation exists in the new format, return true.
|
||||
// TODO: we should have a verification mode that checks both keyspaces
|
||||
// to ensure the relation only exists in one of them.
|
||||
if exists_v2 {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_rels_v1(
|
||||
&self,
|
||||
spcnode: Oid,
|
||||
dbnode: Oid,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<HashSet<RelTag>, PageReconstructError> {
|
||||
let key = rel_dir_to_key(spcnode, dbnode);
|
||||
// fetch directory listing (old)
|
||||
|
||||
let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
|
||||
|
||||
if let Some((cached_key, dir)) = deserialized_reldir_v1 {
|
||||
if cached_key == key {
|
||||
return Ok(dir.rels.contains(&(tag.relnode, tag.forknum)));
|
||||
} else if cfg!(test) || cfg!(feature = "testing") {
|
||||
panic!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
} else {
|
||||
warn!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
}
|
||||
// Fallback to reading the directory from the datadir.
|
||||
}
|
||||
let buf = version.get(self, key, ctx).await?;
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
let rels_v1: HashSet<RelTag> =
|
||||
HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: *relnode,
|
||||
forknum: *forknum,
|
||||
}));
|
||||
Ok(rels_v1)
|
||||
}
|
||||
|
||||
async fn list_rels_v2(
|
||||
&self,
|
||||
spcnode: Oid,
|
||||
dbnode: Oid,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<HashSet<RelTag>, PageReconstructError> {
|
||||
let key_range = rel_tag_sparse_key_range(spcnode, dbnode);
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self.conf.get_vectored_concurrent_io,
|
||||
self.gate
|
||||
.enter()
|
||||
.map_err(|_| PageReconstructError::Cancelled)?,
|
||||
);
|
||||
let results = self
|
||||
.scan(
|
||||
KeySpace::single(key_range),
|
||||
version.get_lsn(),
|
||||
ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
let mut rels = HashSet::new();
|
||||
for (key, val) in results {
|
||||
let val = RelDirExists::decode(&val?).map_err(|_| {
|
||||
PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: decode failed, {}",
|
||||
key
|
||||
))
|
||||
})?;
|
||||
if key.field6 != 1 {
|
||||
return Err(PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: field6 != 1, {}",
|
||||
key
|
||||
)));
|
||||
}
|
||||
if key.field2 != spcnode {
|
||||
return Err(PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: field2 != spcnode, {}",
|
||||
key
|
||||
)));
|
||||
}
|
||||
if key.field3 != dbnode {
|
||||
return Err(PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: field3 != dbnode, {}",
|
||||
key
|
||||
)));
|
||||
}
|
||||
let tag = RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: key.field4,
|
||||
forknum: key.field5,
|
||||
};
|
||||
if val == RelDirExists::Removed {
|
||||
debug_assert!(!rels.contains(&tag), "removed reltag in v2");
|
||||
continue;
|
||||
}
|
||||
let did_not_contain = rels.insert(tag);
|
||||
debug_assert!(did_not_contain, "duplicate reltag in v2");
|
||||
}
|
||||
Ok(rels)
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
let exists_v1 = dir.rels.contains(&(tag.relnode, tag.forknum));
|
||||
Ok(exists_v1)
|
||||
}
|
||||
|
||||
/// Get a list of all existing relations in given tablespace and database.
|
||||
@@ -835,45 +682,60 @@ impl Timeline {
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<HashSet<RelTag>, PageReconstructError> {
|
||||
let (v2_status, migrated_lsn) = self.get_rel_size_v2_status();
|
||||
// fetch directory listing (old)
|
||||
let key = rel_dir_to_key(spcnode, dbnode);
|
||||
let buf = version.get(self, key, ctx).await?;
|
||||
|
||||
match v2_status {
|
||||
RelSizeMigration::Legacy => {
|
||||
let rels_v1 = self.list_rels_v1(spcnode, dbnode, version, ctx).await?;
|
||||
Ok(rels_v1)
|
||||
}
|
||||
RelSizeMigration::Migrating | RelSizeMigration::Migrated
|
||||
if version.get_lsn() < migrated_lsn.unwrap_or(Lsn(0)) =>
|
||||
{
|
||||
// For requests below the migrated LSN, we still use the v1 read path.
|
||||
let rels_v1 = self.list_rels_v1(spcnode, dbnode, version, ctx).await?;
|
||||
Ok(rels_v1)
|
||||
}
|
||||
RelSizeMigration::Migrating => {
|
||||
let rels_v1 = self.list_rels_v1(spcnode, dbnode, version, ctx).await?;
|
||||
let rels_v2_res = self.list_rels_v2(spcnode, dbnode, version, ctx).await;
|
||||
match rels_v2_res {
|
||||
Ok(rels_v2) if rels_v1 == rels_v2 => {}
|
||||
Ok(rels_v2) => {
|
||||
tracing::warn!(
|
||||
"inconsistent v1/v2 reldir keyspace for db {} {}: v1_rels.len()={}, v2_rels.len()={}",
|
||||
spcnode,
|
||||
dbnode,
|
||||
rels_v1.len(),
|
||||
rels_v2.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("failed to list rels in v2: {e}");
|
||||
}
|
||||
}
|
||||
Ok(rels_v1)
|
||||
}
|
||||
RelSizeMigration::Migrated => {
|
||||
let rels_v2 = self.list_rels_v2(spcnode, dbnode, version, ctx).await?;
|
||||
Ok(rels_v2)
|
||||
}
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
let rels_v1: HashSet<RelTag> =
|
||||
HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: *relnode,
|
||||
forknum: *forknum,
|
||||
}));
|
||||
|
||||
if let RelSizeMigration::Legacy = self.get_rel_size_v2_status() {
|
||||
return Ok(rels_v1);
|
||||
}
|
||||
|
||||
// scan directory listing (new), merge with the old results
|
||||
let key_range = rel_tag_sparse_key_range(spcnode, dbnode);
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self.conf.get_vectored_concurrent_io,
|
||||
self.gate
|
||||
.enter()
|
||||
.map_err(|_| PageReconstructError::Cancelled)?,
|
||||
);
|
||||
let results = self
|
||||
.scan(
|
||||
KeySpace::single(key_range),
|
||||
version.get_lsn(),
|
||||
ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
let mut rels = rels_v1;
|
||||
for (key, val) in results {
|
||||
let val = RelDirExists::decode(&val?)
|
||||
.map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
|
||||
assert_eq!(key.field6, 1);
|
||||
assert_eq!(key.field2, spcnode);
|
||||
assert_eq!(key.field3, dbnode);
|
||||
let tag = RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: key.field4,
|
||||
forknum: key.field5,
|
||||
};
|
||||
if val == RelDirExists::Removed {
|
||||
debug_assert!(!rels.contains(&tag), "removed reltag in v2");
|
||||
continue;
|
||||
}
|
||||
let did_not_contain = rels.insert(tag);
|
||||
debug_assert!(did_not_contain, "duplicate reltag in v2");
|
||||
}
|
||||
Ok(rels)
|
||||
}
|
||||
|
||||
/// Get the whole SLRU segment
|
||||
@@ -1392,16 +1254,11 @@ impl Timeline {
|
||||
let dbdir = DbDirectory::des(&buf)?;
|
||||
|
||||
let mut total_size: u64 = 0;
|
||||
let mut dbdir_cnt = 0;
|
||||
let mut rel_cnt = 0;
|
||||
|
||||
for &(spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
dbdir_cnt += 1;
|
||||
for (spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
for rel in self
|
||||
.list_rels(spcnode, dbnode, Version::at(lsn), ctx)
|
||||
.list_rels(*spcnode, *dbnode, Version::at(lsn), ctx)
|
||||
.await?
|
||||
{
|
||||
rel_cnt += 1;
|
||||
if self.cancel.is_cancelled() {
|
||||
return Err(CalculateLogicalSizeError::Cancelled);
|
||||
}
|
||||
@@ -1412,10 +1269,6 @@ impl Timeline {
|
||||
total_size += relsize as u64;
|
||||
}
|
||||
}
|
||||
|
||||
self.db_rel_count
|
||||
.store(Some(Arc::new((dbdir_cnt, rel_cnt))));
|
||||
|
||||
Ok(total_size * BLCKSZ as u64)
|
||||
}
|
||||
|
||||
@@ -1703,9 +1556,6 @@ pub struct DatadirModification<'a> {
|
||||
|
||||
/// An **approximation** of how many metadata bytes will be written to the EphemeralFile.
|
||||
pending_metadata_bytes: usize,
|
||||
|
||||
/// Whether we are importing a pgdata directory.
|
||||
is_importing_pgdata: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
@@ -1718,14 +1568,6 @@ pub enum MetricsUpdate {
|
||||
Sub(u64),
|
||||
}
|
||||
|
||||
/// Controls the behavior of the reldir keyspace.
|
||||
pub struct RelDirMode {
|
||||
// Whether we can read the v2 keyspace or not.
|
||||
current_status: RelSizeMigration,
|
||||
// Whether we should initialize the v2 keyspace or not.
|
||||
initialize: bool,
|
||||
}
|
||||
|
||||
impl DatadirModification<'_> {
|
||||
// When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
|
||||
// contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
|
||||
@@ -2081,49 +1923,30 @@ impl DatadirModification<'_> {
|
||||
}
|
||||
|
||||
/// Returns `true` if the rel_size_v2 write path is enabled. If it is the first time that
|
||||
/// we enable it, we also need to persist it in `index_part.json` (initialize is true).
|
||||
///
|
||||
/// As this function is only used on the write path, we do not need to read the migrated_at
|
||||
/// field.
|
||||
pub fn maybe_enable_rel_size_v2(&mut self, is_create: bool) -> anyhow::Result<RelDirMode> {
|
||||
// TODO: define the behavior of the tenant-level config flag and use feature flag to enable this feature
|
||||
|
||||
let (status, _) = self.tline.get_rel_size_v2_status();
|
||||
/// we enable it, we also need to persist it in `index_part.json`.
|
||||
pub fn maybe_enable_rel_size_v2(&mut self) -> anyhow::Result<bool> {
|
||||
let status = self.tline.get_rel_size_v2_status();
|
||||
let config = self.tline.get_rel_size_v2_enabled();
|
||||
match (config, status) {
|
||||
(false, RelSizeMigration::Legacy) => {
|
||||
// tenant config didn't enable it and we didn't write any reldir_v2 key yet
|
||||
Ok(RelDirMode {
|
||||
current_status: RelSizeMigration::Legacy,
|
||||
initialize: false,
|
||||
})
|
||||
Ok(false)
|
||||
}
|
||||
(false, status @ RelSizeMigration::Migrating | status @ RelSizeMigration::Migrated) => {
|
||||
(false, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
|
||||
// index_part already persisted that the timeline has enabled rel_size_v2
|
||||
Ok(RelDirMode {
|
||||
current_status: status,
|
||||
initialize: false,
|
||||
})
|
||||
Ok(true)
|
||||
}
|
||||
(true, RelSizeMigration::Legacy) => {
|
||||
// The first time we enable it, we need to persist it in `index_part.json`
|
||||
// The caller should update the reldir status once the initialization is done.
|
||||
//
|
||||
// Only initialize the v2 keyspace on new relation creation. No initialization
|
||||
// during `timeline_create` (TODO: fix this, we should allow, but currently it
|
||||
// hits consistency issues).
|
||||
Ok(RelDirMode {
|
||||
current_status: RelSizeMigration::Legacy,
|
||||
initialize: is_create && !self.is_importing_pgdata,
|
||||
})
|
||||
self.tline
|
||||
.update_rel_size_v2_status(RelSizeMigration::Migrating)?;
|
||||
tracing::info!("enabled rel_size_v2");
|
||||
Ok(true)
|
||||
}
|
||||
(true, status @ RelSizeMigration::Migrating | status @ RelSizeMigration::Migrated) => {
|
||||
(true, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
|
||||
// index_part already persisted that the timeline has enabled rel_size_v2
|
||||
// and we don't need to do anything
|
||||
Ok(RelDirMode {
|
||||
current_status: status,
|
||||
initialize: false,
|
||||
})
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2136,8 +1959,8 @@ impl DatadirModification<'_> {
|
||||
img: Bytes,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
let v2_mode = self
|
||||
.maybe_enable_rel_size_v2(false)
|
||||
let v2_enabled = self
|
||||
.maybe_enable_rel_size_v2()
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
|
||||
// Add it to the directory (if it doesn't exist already)
|
||||
@@ -2153,19 +1976,17 @@ impl DatadirModification<'_> {
|
||||
self.put(DBDIR_KEY, Value::Image(buf.into()));
|
||||
}
|
||||
if r.is_none() {
|
||||
if v2_mode.current_status != RelSizeMigration::Legacy {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
}
|
||||
|
||||
// Create RelDirectory in v1 keyspace. TODO: if we have fully migrated to v2, no need to create this directory.
|
||||
// Some code path relies on this directory to be present. We should remove it once we starts to set tenants to
|
||||
// `RelSizeMigration::Migrated` state (currently we don't, all tenants will have `RelSizeMigration::Migrating`).
|
||||
// Create RelDirectory
|
||||
// TODO: if we have fully migrated to v2, no need to create this directory
|
||||
let buf = RelDirectory::ser(&RelDirectory {
|
||||
rels: HashSet::new(),
|
||||
})?;
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
|
||||
if v2_enabled {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
}
|
||||
self.put(
|
||||
rel_dir_to_key(spcnode, dbnode),
|
||||
Value::Image(Bytes::from(buf)),
|
||||
@@ -2272,109 +2093,6 @@ impl DatadirModification<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn initialize_rel_size_v2_keyspace(
|
||||
&mut self,
|
||||
ctx: &RequestContext,
|
||||
dbdir: &DbDirectory,
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Copy everything from relv1 to relv2; TODO: check if there's any key in the v2 keyspace, if so, abort.
|
||||
tracing::info!("initializing rel_size_v2 keyspace");
|
||||
let mut rel_cnt = 0;
|
||||
// relmap_exists (the value of dbdirs hashmap) does not affect the migration: we need to copy things over anyways
|
||||
for &(spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
let rel_dir_key = rel_dir_to_key(spcnode, dbnode);
|
||||
let rel_dir = RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?;
|
||||
for (relnode, forknum) in rel_dir.rels {
|
||||
let sparse_rel_dir_key = rel_tag_sparse_key(spcnode, dbnode, relnode, forknum);
|
||||
self.put(
|
||||
sparse_rel_dir_key,
|
||||
Value::Image(RelDirExists::Exists.encode()),
|
||||
);
|
||||
tracing::info!(
|
||||
"migrated rel_size_v2: {}",
|
||||
RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode,
|
||||
forknum
|
||||
}
|
||||
);
|
||||
rel_cnt += 1;
|
||||
}
|
||||
}
|
||||
tracing::info!(
|
||||
"initialized rel_size_v2 keyspace at lsn {}: migrated {} relations",
|
||||
self.lsn,
|
||||
rel_cnt
|
||||
);
|
||||
self.tline
|
||||
.update_rel_size_v2_status(RelSizeMigration::Migrating, Some(self.lsn))
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
Ok::<_, WalIngestError>(())
|
||||
}
|
||||
|
||||
async fn put_rel_creation_v1(
|
||||
&mut self,
|
||||
rel: RelTag,
|
||||
dbdir_exists: bool,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Reldir v1 write path
|
||||
let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
|
||||
let mut rel_dir = if !dbdir_exists {
|
||||
// Create the RelDirectory
|
||||
RelDirectory::default()
|
||||
} else {
|
||||
// reldir already exists, fetch it
|
||||
RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?
|
||||
};
|
||||
|
||||
// Add the new relation to the rel directory entry, and write it back
|
||||
if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)))
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Add(1)));
|
||||
self.put(
|
||||
rel_dir_key,
|
||||
Value::Image(Bytes::from(RelDirectory::ser(&rel_dir)?)),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn put_rel_creation_v2(
|
||||
&mut self,
|
||||
rel: RelTag,
|
||||
dbdir_exists: bool,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Reldir v2 write path
|
||||
let sparse_rel_dir_key =
|
||||
rel_tag_sparse_key(rel.spcnode, rel.dbnode, rel.relnode, rel.forknum);
|
||||
// check if the rel_dir_key exists in v2
|
||||
let val = self.sparse_get(sparse_rel_dir_key, ctx).await?;
|
||||
let val = RelDirExists::decode_option(val)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidRelDirKey(sparse_rel_dir_key))?;
|
||||
if val == RelDirExists::Exists {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
self.put(
|
||||
sparse_rel_dir_key,
|
||||
Value::Image(RelDirExists::Exists.encode()),
|
||||
);
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Add(1)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a relation fork.
|
||||
///
|
||||
/// 'nblocks' is the initial size.
|
||||
@@ -2408,31 +2126,66 @@ impl DatadirModification<'_> {
|
||||
true
|
||||
};
|
||||
|
||||
let mut v2_mode = self
|
||||
.maybe_enable_rel_size_v2(true)
|
||||
let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
|
||||
let mut rel_dir = if !dbdir_exists {
|
||||
// Create the RelDirectory
|
||||
RelDirectory::default()
|
||||
} else {
|
||||
// reldir already exists, fetch it
|
||||
RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?
|
||||
};
|
||||
|
||||
let v2_enabled = self
|
||||
.maybe_enable_rel_size_v2()
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
|
||||
if v2_mode.initialize {
|
||||
if let Err(e) = self.initialize_rel_size_v2_keyspace(ctx, &dbdir).await {
|
||||
tracing::warn!("error initializing rel_size_v2 keyspace: {}", e);
|
||||
// TODO: circuit breaker so that it won't retry forever
|
||||
} else {
|
||||
v2_mode.current_status = RelSizeMigration::Migrating;
|
||||
if v2_enabled {
|
||||
if rel_dir.rels.contains(&(rel.relnode, rel.forknum)) {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
}
|
||||
|
||||
if v2_mode.current_status != RelSizeMigration::Migrated {
|
||||
self.put_rel_creation_v1(rel, dbdir_exists, ctx).await?;
|
||||
}
|
||||
|
||||
if v2_mode.current_status != RelSizeMigration::Legacy {
|
||||
let write_v2_res = self.put_rel_creation_v2(rel, dbdir_exists, ctx).await;
|
||||
if let Err(e) = write_v2_res {
|
||||
if v2_mode.current_status == RelSizeMigration::Migrated {
|
||||
return Err(e);
|
||||
}
|
||||
tracing::warn!("error writing rel_size_v2 keyspace: {}", e);
|
||||
let sparse_rel_dir_key =
|
||||
rel_tag_sparse_key(rel.spcnode, rel.dbnode, rel.relnode, rel.forknum);
|
||||
// check if the rel_dir_key exists in v2
|
||||
let val = self.sparse_get(sparse_rel_dir_key, ctx).await?;
|
||||
let val = RelDirExists::decode_option(val)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidRelDirKey(sparse_rel_dir_key))?;
|
||||
if val == RelDirExists::Exists {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
self.put(
|
||||
sparse_rel_dir_key,
|
||||
Value::Image(RelDirExists::Exists.encode()),
|
||||
);
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
// We don't write `rel_dir_key -> rel_dir.rels` back to the storage in the v2 path unless it's the initial creation.
|
||||
// TODO: if we have fully migrated to v2, no need to create this directory. Otherwise, there
|
||||
// will be key not found errors if we don't create an empty one for rel_size_v2.
|
||||
self.put(
|
||||
rel_dir_key,
|
||||
Value::Image(Bytes::from(RelDirectory::ser(&RelDirectory::default())?)),
|
||||
);
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Add(1)));
|
||||
} else {
|
||||
// Add the new relation to the rel directory entry, and write it back
|
||||
if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)))
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Add(1)));
|
||||
self.put(
|
||||
rel_dir_key,
|
||||
Value::Image(Bytes::from(RelDirectory::ser(&rel_dir)?)),
|
||||
);
|
||||
}
|
||||
|
||||
// Put size
|
||||
@@ -2507,12 +2260,15 @@ impl DatadirModification<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn put_rel_drop_v1(
|
||||
/// Drop some relations
|
||||
pub(crate) async fn put_rel_drops(
|
||||
&mut self,
|
||||
drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<BTreeSet<RelTag>, WalIngestError> {
|
||||
let mut dropped_rels = BTreeSet::new();
|
||||
) -> Result<(), WalIngestError> {
|
||||
let v2_enabled = self
|
||||
.maybe_enable_rel_size_v2()
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
for ((spc_node, db_node), rel_tags) in drop_relations {
|
||||
let dir_key = rel_dir_to_key(spc_node, db_node);
|
||||
let buf = self.get(dir_key, ctx).await?;
|
||||
@@ -2524,8 +2280,25 @@ impl DatadirModification<'_> {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Sub(1)));
|
||||
dirty = true;
|
||||
dropped_rels.insert(rel_tag);
|
||||
true
|
||||
} else if v2_enabled {
|
||||
// The rel is not found in the old reldir key, so we need to check the new sparse keyspace.
|
||||
// Note that a relation can only exist in one of the two keyspaces (guaranteed by the ingestion
|
||||
// logic).
|
||||
let key =
|
||||
rel_tag_sparse_key(spc_node, db_node, rel_tag.relnode, rel_tag.forknum);
|
||||
let val = RelDirExists::decode_option(self.sparse_get(key, ctx).await?)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidKey(key, self.lsn))?;
|
||||
if val == RelDirExists::Exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Sub(1)));
|
||||
// put tombstone
|
||||
self.put(key, Value::Image(RelDirExists::Removed.encode()));
|
||||
// no need to set dirty to true
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
@@ -2548,67 +2321,7 @@ impl DatadirModification<'_> {
|
||||
self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
|
||||
}
|
||||
}
|
||||
Ok(dropped_rels)
|
||||
}
|
||||
|
||||
async fn put_rel_drop_v2(
|
||||
&mut self,
|
||||
drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<BTreeSet<RelTag>, WalIngestError> {
|
||||
let mut dropped_rels = BTreeSet::new();
|
||||
for ((spc_node, db_node), rel_tags) in drop_relations {
|
||||
for rel_tag in rel_tags {
|
||||
let key = rel_tag_sparse_key(spc_node, db_node, rel_tag.relnode, rel_tag.forknum);
|
||||
let val = RelDirExists::decode_option(self.sparse_get(key, ctx).await?)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidKey(key, self.lsn))?;
|
||||
if val == RelDirExists::Exists {
|
||||
dropped_rels.insert(rel_tag);
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Sub(1)));
|
||||
// put tombstone
|
||||
self.put(key, Value::Image(RelDirExists::Removed.encode()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(dropped_rels)
|
||||
}
|
||||
|
||||
/// Drop some relations
|
||||
pub(crate) async fn put_rel_drops(
|
||||
&mut self,
|
||||
drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
let v2_mode = self
|
||||
.maybe_enable_rel_size_v2(false)
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
match v2_mode.current_status {
|
||||
RelSizeMigration::Legacy => {
|
||||
self.put_rel_drop_v1(drop_relations, ctx).await?;
|
||||
}
|
||||
RelSizeMigration::Migrating => {
|
||||
let dropped_rels_v1 = self.put_rel_drop_v1(drop_relations.clone(), ctx).await?;
|
||||
let dropped_rels_v2_res = self.put_rel_drop_v2(drop_relations, ctx).await;
|
||||
match dropped_rels_v2_res {
|
||||
Ok(dropped_rels_v2) => {
|
||||
if dropped_rels_v1 != dropped_rels_v2 {
|
||||
tracing::warn!(
|
||||
"inconsistent v1/v2 rel drop: dropped_rels_v1.len()={}, dropped_rels_v2.len()={}",
|
||||
dropped_rels_v1.len(),
|
||||
dropped_rels_v2.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("error dropping rels: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
RelSizeMigration::Migrated => {
|
||||
self.put_rel_drop_v2(drop_relations, ctx).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1205,7 +1205,6 @@ impl TenantShard {
|
||||
idempotency.clone(),
|
||||
index_part.gc_compaction.clone(),
|
||||
index_part.rel_size_migration.clone(),
|
||||
index_part.rel_size_migrated_at,
|
||||
ctx,
|
||||
)?;
|
||||
let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
|
||||
@@ -2585,7 +2584,6 @@ impl TenantShard {
|
||||
initdb_lsn,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
@@ -2915,7 +2913,6 @@ impl TenantShard {
|
||||
initdb_lsn,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
@@ -4345,7 +4342,6 @@ impl TenantShard {
|
||||
create_idempotency: CreateTimelineIdempotency,
|
||||
gc_compaction_state: Option<GcCompactionState>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<(Arc<Timeline>, RequestContext)> {
|
||||
let state = match cause {
|
||||
@@ -4380,7 +4376,6 @@ impl TenantShard {
|
||||
create_idempotency,
|
||||
gc_compaction_state,
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
self.cancel.child_token(),
|
||||
);
|
||||
|
||||
@@ -5090,7 +5085,6 @@ impl TenantShard {
|
||||
src_timeline.pg_version,
|
||||
);
|
||||
|
||||
let (rel_size_v2_status, rel_size_migrated_at) = src_timeline.get_rel_size_v2_status();
|
||||
let (uninitialized_timeline, _timeline_ctx) = self
|
||||
.prepare_new_timeline(
|
||||
dst_id,
|
||||
@@ -5098,8 +5092,7 @@ impl TenantShard {
|
||||
timeline_create_guard,
|
||||
start_lsn + 1,
|
||||
Some(Arc::clone(src_timeline)),
|
||||
Some(rel_size_v2_status),
|
||||
rel_size_migrated_at,
|
||||
Some(src_timeline.get_rel_size_v2_status()),
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
@@ -5386,7 +5379,6 @@ impl TenantShard {
|
||||
pgdata_lsn,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
@@ -5470,17 +5462,14 @@ impl TenantShard {
|
||||
start_lsn: Lsn,
|
||||
ancestor: Option<Arc<Timeline>>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<(UninitializedTimeline<'a>, RequestContext)> {
|
||||
let tenant_shard_id = self.tenant_shard_id;
|
||||
|
||||
let resources = self.build_timeline_resources(new_timeline_id);
|
||||
resources.remote_client.init_upload_queue_for_empty_remote(
|
||||
new_metadata,
|
||||
rel_size_v2_status.clone(),
|
||||
rel_size_migrated_at,
|
||||
)?;
|
||||
resources
|
||||
.remote_client
|
||||
.init_upload_queue_for_empty_remote(new_metadata, rel_size_v2_status.clone())?;
|
||||
|
||||
let (timeline_struct, timeline_ctx) = self
|
||||
.create_timeline_struct(
|
||||
@@ -5493,7 +5482,6 @@ impl TenantShard {
|
||||
create_guard.idempotency.clone(),
|
||||
None,
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
ctx,
|
||||
)
|
||||
.context("Failed to create timeline data structure")?;
|
||||
|
||||
@@ -826,6 +826,18 @@ impl TenantManager {
|
||||
peek_slot.is_some()
|
||||
}
|
||||
|
||||
/// Returns whether a local slot exists for a child shard of the given tenant and shard count.
|
||||
/// Note that this just checks for a shard with a larger shard count, and it may not be a
|
||||
/// direct child of the given shard.
|
||||
pub(crate) fn has_child_shard(&self, tenant_id: TenantId, shard_index: ShardIndex) -> bool {
|
||||
match &*self.tenants.read().unwrap() {
|
||||
TenantsMap::Initializing => false,
|
||||
TenantsMap::Open(slots) | TenantsMap::ShuttingDown(slots) => slots
|
||||
.range(TenantShardId::tenant_range(tenant_id))
|
||||
.any(|(tsid, _)| tsid.shard_count > shard_index.shard_count),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
|
||||
pub(crate) async fn upsert_location(
|
||||
&self,
|
||||
|
||||
@@ -443,8 +443,7 @@ impl RemoteTimelineClient {
|
||||
pub fn init_upload_queue_for_empty_remote(
|
||||
&self,
|
||||
local_metadata: &TimelineMetadata,
|
||||
rel_size_v2_migration: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
) -> anyhow::Result<()> {
|
||||
// Set the maximum number of inprogress tasks to the remote storage concurrency. There's
|
||||
// certainly no point in starting more upload tasks than this.
|
||||
@@ -456,8 +455,7 @@ impl RemoteTimelineClient {
|
||||
let mut upload_queue = self.upload_queue.lock().unwrap();
|
||||
let initialized_queue =
|
||||
upload_queue.initialize_empty_remote(local_metadata, inprogress_limit)?;
|
||||
initialized_queue.dirty.rel_size_migration = rel_size_v2_migration;
|
||||
initialized_queue.dirty.rel_size_migrated_at = rel_size_migrated_at;
|
||||
initialized_queue.dirty.rel_size_migration = rel_size_v2_status;
|
||||
self.update_remote_physical_size_gauge(None);
|
||||
info!("initialized upload queue as empty");
|
||||
Ok(())
|
||||
@@ -996,12 +994,10 @@ impl RemoteTimelineClient {
|
||||
pub(crate) fn schedule_index_upload_for_rel_size_v2_status_update(
|
||||
self: &Arc<Self>,
|
||||
rel_size_v2_status: RelSizeMigration,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut guard = self.upload_queue.lock().unwrap();
|
||||
let upload_queue = guard.initialized_mut()?;
|
||||
upload_queue.dirty.rel_size_migration = Some(rel_size_v2_status);
|
||||
upload_queue.dirty.rel_size_migrated_at = rel_size_migrated_at;
|
||||
// TODO: allow this operation to bypass the validation check because we might upload the index part
|
||||
// with no layers but the flag updated. For now, we just modify the index part in memory and the next
|
||||
// upload will include the flag.
|
||||
|
||||
@@ -114,11 +114,6 @@ pub struct IndexPart {
|
||||
/// The timestamp when the timeline was marked invisible in synthetic size calculations.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) marked_invisible_at: Option<NaiveDateTime>,
|
||||
|
||||
/// The LSN at which we started the rel size migration. Accesses below this LSN should be
|
||||
/// processed with the v1 read path. Usually this LSN should be set together with `rel_size_migration`.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) rel_size_migrated_at: Option<Lsn>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
@@ -147,12 +142,10 @@ impl IndexPart {
|
||||
/// - 12: +l2_lsn
|
||||
/// - 13: +gc_compaction
|
||||
/// - 14: +marked_invisible_at
|
||||
/// - 15: +rel_size_migrated_at
|
||||
const LATEST_VERSION: usize = 15;
|
||||
const LATEST_VERSION: usize = 14;
|
||||
|
||||
// Versions we may see when reading from a bucket.
|
||||
pub const KNOWN_VERSIONS: &'static [usize] =
|
||||
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
|
||||
pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14];
|
||||
|
||||
pub const FILE_NAME: &'static str = "index_part.json";
|
||||
|
||||
@@ -172,7 +165,6 @@ impl IndexPart {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -483,7 +475,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -533,7 +524,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -584,7 +574,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -638,7 +627,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let empty_layers_parsed = IndexPart::from_json_bytes(empty_layers_json.as_bytes()).unwrap();
|
||||
@@ -687,7 +675,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -739,7 +726,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -796,7 +782,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -858,7 +843,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -921,7 +905,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -989,7 +972,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1070,7 +1052,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1152,7 +1133,6 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1240,7 +1220,6 @@ mod tests {
|
||||
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
}),
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1329,97 +1308,6 @@ mod tests {
|
||||
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
}),
|
||||
marked_invisible_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v15_rel_size_migrated_at_is_parsed() {
|
||||
let example = r#"{
|
||||
"version": 15,
|
||||
"layer_metadata":{
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
|
||||
},
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata": {
|
||||
"disk_consistent_lsn": "0/16960E8",
|
||||
"prev_record_lsn": "0/1696070",
|
||||
"ancestor_timeline": "e45a7f37d3ee2ff17dc14bf4f4e3f52e",
|
||||
"ancestor_lsn": "0/0",
|
||||
"latest_gc_cutoff_lsn": "0/1696070",
|
||||
"initdb_lsn": "0/1696070",
|
||||
"pg_version": 14
|
||||
},
|
||||
"gc_blocking": {
|
||||
"started_at": "2024-07-19T09:00:00.123",
|
||||
"reasons": ["DetachAncestor"]
|
||||
},
|
||||
"import_pgdata": {
|
||||
"V1": {
|
||||
"Done": {
|
||||
"idempotency_key": "specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5",
|
||||
"started_at": "2024-11-13T09:23:42.123",
|
||||
"finished_at": "2024-11-13T09:42:23.123"
|
||||
}
|
||||
}
|
||||
},
|
||||
"rel_size_migration": "legacy",
|
||||
"l2_lsn": "0/16960E8",
|
||||
"gc_compaction": {
|
||||
"last_completed_lsn": "0/16960E8"
|
||||
},
|
||||
"marked_invisible_at": "2023-07-31T09:00:00.123",
|
||||
"rel_size_migrated_at": "0/16960E8"
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 15,
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none(),
|
||||
shard: ShardIndex::unsharded()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata {
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none(),
|
||||
shard: ShardIndex::unsharded()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata: TimelineMetadata::new(
|
||||
Lsn::from_str("0/16960E8").unwrap(),
|
||||
Some(Lsn::from_str("0/1696070").unwrap()),
|
||||
Some(TimelineId::from_str("e45a7f37d3ee2ff17dc14bf4f4e3f52e").unwrap()),
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
gc_blocking: Some(GcBlocking {
|
||||
started_at: parse_naive_datetime("2024-07-19T09:00:00.123000000"),
|
||||
reasons: enumset::EnumSet::from_iter([GcBlockingReason::DetachAncestor]),
|
||||
}),
|
||||
last_aux_file_policy: Default::default(),
|
||||
archived_at: None,
|
||||
import_pgdata: Some(import_pgdata::index_part_format::Root::V1(import_pgdata::index_part_format::V1::Done(import_pgdata::index_part_format::Done{
|
||||
started_at: parse_naive_datetime("2024-11-13T09:23:42.123000000"),
|
||||
finished_at: parse_naive_datetime("2024-11-13T09:42:23.123000000"),
|
||||
idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new("specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5".to_string()),
|
||||
}))),
|
||||
rel_size_migration: Some(RelSizeMigration::Legacy),
|
||||
l2_lsn: Some("0/16960E8".parse::<Lsn>().unwrap()),
|
||||
gc_compaction: Some(GcCompactionState {
|
||||
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
}),
|
||||
marked_invisible_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
|
||||
rel_size_migrated_at: Some("0/16960E8".parse::<Lsn>().unwrap()),
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
|
||||
@@ -287,7 +287,7 @@ pub struct Timeline {
|
||||
ancestor_lsn: Lsn,
|
||||
|
||||
// The LSN of gc-compaction that was last applied to this timeline.
|
||||
gc_compaction_state: ArcSwapOption<GcCompactionState>,
|
||||
gc_compaction_state: ArcSwap<Option<GcCompactionState>>,
|
||||
|
||||
pub(crate) metrics: Arc<TimelineMetrics>,
|
||||
|
||||
@@ -441,7 +441,7 @@ pub struct Timeline {
|
||||
/// heatmap on demand.
|
||||
heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
|
||||
|
||||
pub(crate) rel_size_v2_status: ArcSwap<(Option<RelSizeMigration>, Option<Lsn>)>,
|
||||
pub(crate) rel_size_v2_status: ArcSwapOption<RelSizeMigration>,
|
||||
|
||||
wait_lsn_log_slow: tokio::sync::Semaphore,
|
||||
|
||||
@@ -450,9 +450,6 @@ pub struct Timeline {
|
||||
|
||||
#[expect(dead_code)]
|
||||
feature_resolver: Arc<TenantFeatureResolver>,
|
||||
|
||||
/// Basebackup will collect the count and store it here. Used for reldirv2 rollout.
|
||||
pub(crate) db_rel_count: ArcSwapOption<(usize, usize)>,
|
||||
}
|
||||
|
||||
pub(crate) enum PreviousHeatmap {
|
||||
@@ -2894,9 +2891,12 @@ impl Timeline {
|
||||
.unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
|
||||
}
|
||||
|
||||
pub(crate) fn get_rel_size_v2_status(&self) -> (RelSizeMigration, Option<Lsn>) {
|
||||
let (status, migrated_at) = self.rel_size_v2_status.load().as_ref().clone();
|
||||
(status.unwrap_or(RelSizeMigration::Legacy), migrated_at)
|
||||
pub(crate) fn get_rel_size_v2_status(&self) -> RelSizeMigration {
|
||||
self.rel_size_v2_status
|
||||
.load()
|
||||
.as_ref()
|
||||
.map(|s| s.as_ref().clone())
|
||||
.unwrap_or(RelSizeMigration::Legacy)
|
||||
}
|
||||
|
||||
fn get_compaction_upper_limit(&self) -> usize {
|
||||
@@ -3171,7 +3171,6 @@ impl Timeline {
|
||||
create_idempotency: crate::tenant::CreateTimelineIdempotency,
|
||||
gc_compaction_state: Option<GcCompactionState>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
cancel: CancellationToken,
|
||||
) -> Arc<Self> {
|
||||
let disk_consistent_lsn = metadata.disk_consistent_lsn();
|
||||
@@ -3238,7 +3237,7 @@ impl Timeline {
|
||||
}),
|
||||
disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
|
||||
|
||||
gc_compaction_state: ArcSwapOption::from_pointee(gc_compaction_state),
|
||||
gc_compaction_state: ArcSwap::new(Arc::new(gc_compaction_state)),
|
||||
|
||||
last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
|
||||
last_freeze_ts: RwLock::new(Instant::now()),
|
||||
@@ -3336,18 +3335,13 @@ impl Timeline {
|
||||
|
||||
heatmap_layers_downloader: Mutex::new(None),
|
||||
|
||||
rel_size_v2_status: ArcSwap::from_pointee((
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
)),
|
||||
rel_size_v2_status: ArcSwapOption::from_pointee(rel_size_v2_status),
|
||||
|
||||
wait_lsn_log_slow: tokio::sync::Semaphore::new(1),
|
||||
|
||||
basebackup_cache: resources.basebackup_cache,
|
||||
|
||||
feature_resolver: resources.feature_resolver.clone(),
|
||||
|
||||
db_rel_count: ArcSwapOption::from_pointee(None),
|
||||
};
|
||||
|
||||
result.repartition_threshold =
|
||||
@@ -3419,7 +3413,7 @@ impl Timeline {
|
||||
gc_compaction_state: GcCompactionState,
|
||||
) -> anyhow::Result<()> {
|
||||
self.gc_compaction_state
|
||||
.store(Some(Arc::new(gc_compaction_state.clone())));
|
||||
.store(Arc::new(Some(gc_compaction_state.clone())));
|
||||
self.remote_client
|
||||
.schedule_index_upload_for_gc_compaction_state_update(gc_compaction_state)
|
||||
}
|
||||
@@ -3427,24 +3421,15 @@ impl Timeline {
|
||||
pub(crate) fn update_rel_size_v2_status(
|
||||
&self,
|
||||
rel_size_v2_status: RelSizeMigration,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
) -> anyhow::Result<()> {
|
||||
self.rel_size_v2_status.store(Arc::new((
|
||||
Some(rel_size_v2_status.clone()),
|
||||
rel_size_migrated_at,
|
||||
)));
|
||||
self.rel_size_v2_status
|
||||
.store(Some(Arc::new(rel_size_v2_status.clone())));
|
||||
self.remote_client
|
||||
.schedule_index_upload_for_rel_size_v2_status_update(
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
)
|
||||
.schedule_index_upload_for_rel_size_v2_status_update(rel_size_v2_status)
|
||||
}
|
||||
|
||||
pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
|
||||
self.gc_compaction_state
|
||||
.load()
|
||||
.as_ref()
|
||||
.map(|x| x.as_ref().clone())
|
||||
self.gc_compaction_state.load_full().as_ref().clone()
|
||||
}
|
||||
|
||||
/// Creates and starts the wal receiver.
|
||||
|
||||
@@ -332,7 +332,6 @@ impl DeleteTimelineFlow {
|
||||
crate::tenant::CreateTimelineIdempotency::FailWithConflict, // doesn't matter what we put here
|
||||
None, // doesn't matter what we put here
|
||||
None, // doesn't matter what we put here
|
||||
None, // doesn't matter what we put here
|
||||
ctx,
|
||||
)
|
||||
.context("create_timeline_struct")?;
|
||||
|
||||
@@ -362,7 +362,7 @@ impl<T: Types> Cache<T> {
|
||||
tokio::time::sleep(RETRY_BACKOFF).await;
|
||||
continue;
|
||||
} else {
|
||||
tracing::info!(
|
||||
tracing::warn!(
|
||||
"Failed to resolve tenant shard after {} attempts: {:?}",
|
||||
GET_MAX_RETRIES,
|
||||
e
|
||||
|
||||
@@ -5,10 +5,12 @@ MODULE_big = neon
|
||||
OBJS = \
|
||||
$(WIN32RES) \
|
||||
communicator.o \
|
||||
communicator_new.o \
|
||||
communicator_process.o \
|
||||
extension_server.o \
|
||||
file_cache.o \
|
||||
hll.o \
|
||||
lfc_prewarm.o \
|
||||
libpagestore.o \
|
||||
logical_replication_monitor.o \
|
||||
neon.o \
|
||||
@@ -33,10 +35,6 @@ SHLIB_LINK = -lcurl
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S), Darwin)
|
||||
SHLIB_LINK += -framework Security -framework CoreFoundation -framework SystemConfiguration
|
||||
|
||||
# Link against object files for the current macOS version, to avoid spurious linker warnings.
|
||||
MACOSX_DEPLOYMENT_TARGET := $(shell xcrun --sdk macosx --show-sdk-version)
|
||||
export MACOSX_DEPLOYMENT_TARGET
|
||||
endif
|
||||
|
||||
EXTENSION = neon
|
||||
@@ -67,6 +65,7 @@ WALPROP_OBJS = \
|
||||
|
||||
# libcommunicator.a is built by cargo from the Rust sources under communicator/
|
||||
# subdirectory. `cargo build` also generates communicator_bindings.h.
|
||||
communicator_new.o: communicator/communicator_bindings.h
|
||||
communicator_process.o: communicator/communicator_bindings.h
|
||||
file_cache.o: communicator/communicator_bindings.h
|
||||
|
||||
|
||||
372
pgxn/neon/communicator/Cargo.lock
generated
Normal file
372
pgxn/neon/communicator/Cargo.lock
generated
Normal file
@@ -0,0 +1,372 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.24.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.74"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.22.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "communicator"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"tonic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "futures-core"
|
||||
version = "0.3.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.31.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"http",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body-util"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"http",
|
||||
"http-body",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff70ce3e48ae43fa075863cef62e8b43b71a4f2382229920e0df362592919430"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "1.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a"
|
||||
dependencies = [
|
||||
"pin-project-internal",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-internal"
|
||||
version = "1.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.100"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.44.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-stream"
|
||||
version = "0.1.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85839f0b32fd242bb3209262371d07feda6d780d16ee9d2bc88581b89da1549b"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
"http",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"tokio-stream",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-layer"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
|
||||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
@@ -1,12 +1,8 @@
|
||||
[package]
|
||||
name = "communicator"
|
||||
version = "0.1.0"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["staticlib"]
|
||||
|
||||
[features]
|
||||
# 'testing' feature is currently unused in the communicator, but we accept it for convenience of
|
||||
# calling build scripts, so that you can pass the same feature to all packages.
|
||||
@@ -15,14 +11,36 @@ testing = []
|
||||
# calling build scripts, so that you can pass the same feature to all packages.
|
||||
rest_broker = []
|
||||
|
||||
[lib]
|
||||
crate-type = ["staticlib"]
|
||||
|
||||
[dependencies]
|
||||
axum.workspace = true
|
||||
bytes.workspace = true
|
||||
clashmap.workspace = true
|
||||
http.workspace = true
|
||||
libc.workspace = true
|
||||
nix.workspace = true
|
||||
atomic_enum = "0.3.0"
|
||||
measured.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
strum_macros.workspace = true
|
||||
thiserror.workspace = true
|
||||
tonic = { workspace = true, default-features = false, features=["codegen", "prost", "transport"] }
|
||||
tokio = { workspace = true, features = ["macros", "net", "io-util", "rt", "rt-multi-thread"] }
|
||||
tokio-pipe = { version = "0.2.12" }
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
|
||||
measured.workspace = true
|
||||
metrics.workspace = true
|
||||
uring-common = { workspace = true, features = ["bytes"] }
|
||||
|
||||
pageserver_client_grpc.workspace = true
|
||||
pageserver_api.workspace = true
|
||||
pageserver_page_api.workspace = true
|
||||
|
||||
neon-shmem.workspace = true
|
||||
utils.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../../workspace_hack" }
|
||||
|
||||
|
||||
@@ -3,9 +3,18 @@
|
||||
This package provides the so-called "compute-pageserver communicator",
|
||||
or just "communicator" in short. The communicator is a separate
|
||||
background worker process that runs in the PostgreSQL server. It's
|
||||
part of the neon extension. Currently, it only provides an HTTP
|
||||
endpoint for metrics, but in the future it will evolve to handle all
|
||||
communications with the pageservers.
|
||||
part of the neon extension.
|
||||
|
||||
The commuicator handles the communication with the pageservers, and
|
||||
also provides an HTTP endpoint for metrics over a local Unix Domain
|
||||
socket (aka. the "communicator control socket"). On the PostgreSQL
|
||||
side, the glue code in pgxn/neon/ uses the communicator to implement
|
||||
the PostgreSQL Storage Manager (SMGR) interface.
|
||||
|
||||
## Design criteria
|
||||
|
||||
- Low latency
|
||||
- Saturate a 10 Gbit / s network interface without becoming a bottleneck
|
||||
|
||||
## Source code view
|
||||
|
||||
@@ -14,10 +23,122 @@ pgxn/neon/communicator_process.c
|
||||
the glue that interacts with PostgreSQL code and the Rust
|
||||
code in the communicator process.
|
||||
|
||||
pgxn/neon/communicator_new.c
|
||||
Contains the backend code that interacts with the communicator
|
||||
process.
|
||||
|
||||
pgxn/neon/communicator/src/worker_process/
|
||||
Worker process main loop and glue code
|
||||
pgxn/neon/communicator/src/backend_interface.rs
|
||||
The entry point for calls from each backend.
|
||||
|
||||
pgxn/neon/communicator/src/init.rs
|
||||
Initialization at server startup
|
||||
|
||||
At compilation time, pgxn/neon/communicator/ produces a static
|
||||
library, libcommunicator.a. It is linked to the neon.so extension
|
||||
library.
|
||||
|
||||
The real networking code, which is independent of PostgreSQL, is in
|
||||
the pageserver/client_grpc crate.
|
||||
|
||||
## Process view
|
||||
|
||||
The communicator runs in a dedicated background worker process, the
|
||||
"communicator process". The communicator uses a multi-threaded Tokio
|
||||
runtime to execute the IO requests. So the communicator process has
|
||||
multiple threads running. That's unusual for Postgres processes and
|
||||
care must be taken to make that work.
|
||||
|
||||
### Backend <-> worker communication
|
||||
|
||||
Each backend has a number of I/O request slots in shared memory. The
|
||||
slots are statically allocated for each backend, and must not be
|
||||
accessed by other backends. The worker process reads requests from the
|
||||
shared memory slots, and writes responses back to the slots.
|
||||
|
||||
Here's an example snapshot of the system, when two requests from two
|
||||
different backends are in progress:
|
||||
|
||||
```
|
||||
Backends Request slots Communicator process
|
||||
--------- ------------- --------------------
|
||||
|
||||
Backend 1 1: Idle
|
||||
2: Idle
|
||||
3: Processing tokio task handling request 3
|
||||
|
||||
Backend 2 4: Completed
|
||||
5: Processing tokio task handling request 5
|
||||
6: Idle
|
||||
|
||||
... ...
|
||||
```
|
||||
|
||||
To submit an IO request, the backend first picks one of its Idle
|
||||
slots, writes the IO request in the slot, and updates it to
|
||||
'Submitted' state. That transfers the ownership of the slot to the
|
||||
worker process, until the worker process marks the request as
|
||||
Completed. The worker process spawns a separate Tokio task for each
|
||||
request.
|
||||
|
||||
To inform the worker process that a request slot has a pending IO
|
||||
request, there's a pipe shared by the worker process and all backend
|
||||
processes. The backend writes the index of the request slot to the
|
||||
pipe after changing the slot's state to Submitted. This wakes up the
|
||||
worker process.
|
||||
|
||||
(Note that the pipe is just used for wakeups, but the worker process
|
||||
is free to pick up Submitted IO requests even without receiving the
|
||||
wakeup. As of this writing, it doesn't do that, but it might be useful
|
||||
in the future to reduce latency even further, for example.)
|
||||
|
||||
When the worker process has completed processing the request, it
|
||||
writes the result back in the request slot. A GetPage request can also
|
||||
contain a pointer to buffer in the shared buffer cache. In that case,
|
||||
the worker process writes the resulting page contents directly to the
|
||||
buffer, and just a result code in the request slot. It then updates
|
||||
the 'state' field to Completed, which passes the owner ship back to
|
||||
the originating backend. Finally, it signals the process Latch of the
|
||||
originating backend, waking it up.
|
||||
|
||||
### Differences between PostgreSQL v16, v17 and v18
|
||||
|
||||
PostgreSQL v18 introduced the new AIO mechanism. The PostgreSQL AIO
|
||||
mechanism uses a very similar mechanism as described in the previous
|
||||
section, for the communication between AIO worker processes and
|
||||
backends. With our communicator, the AIO worker processes are not
|
||||
used, but we use the same PgAioHandle request slots as in upstream.
|
||||
For Neon-specific IO requests like GetDbSize, a neon request slot is
|
||||
used. But for the actual IO requests, the request slot merely contains
|
||||
a pointer to the PgAioHandle slot. The worker process updates the
|
||||
status of that, calls the IO callbacks upon completionetc, just like
|
||||
the upstream AIO worker processes do.
|
||||
|
||||
## Sequence diagram
|
||||
|
||||
neon
|
||||
PostgreSQL extension backend_interface.rs worker_process.rs processor tonic
|
||||
| . . . .
|
||||
| smgr_read() . . . .
|
||||
+-------------> + . . .
|
||||
. | . . .
|
||||
. | rcommunicator_ . . .
|
||||
. | get_page_at_lsn . . .
|
||||
. +------------------> + . .
|
||||
| . .
|
||||
| write request to . . .
|
||||
| slot . .
|
||||
| . .
|
||||
| . .
|
||||
| submit_request() . .
|
||||
+-----------------> + .
|
||||
| | .
|
||||
| | db_size_request . .
|
||||
+---------------->.
|
||||
. TODO
|
||||
|
||||
|
||||
|
||||
### Compute <-> pageserver protocol
|
||||
|
||||
The protocol between Compute and the pageserver is based on gRPC. See `protos/`.
|
||||
|
||||
|
||||
224
pgxn/neon/communicator/src/backend_comms.rs
Normal file
224
pgxn/neon/communicator/src/backend_comms.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
//! This module implements a request/response "slot" for submitting
|
||||
//! requests from backends to the communicator process.
|
||||
//!
|
||||
//! NB: The "backend" side of this code runs in Postgres backend processes,
|
||||
//! which means that it is not safe to use the 'tracing' crate for logging, nor
|
||||
//! to launch threads or use tokio tasks!
|
||||
|
||||
use std::cell::UnsafeCell;
|
||||
use std::sync::atomic::{AtomicI32, Ordering};
|
||||
|
||||
use crate::neon_request::{NeonIORequest, NeonIOResult};
|
||||
|
||||
use atomic_enum::atomic_enum;
|
||||
|
||||
/// One request/response slot. Each backend has its own set of slots that it
|
||||
/// uses.
|
||||
///
|
||||
/// This is the moral equivalent of PgAioHandle for Postgres AIO requests
|
||||
/// Like PgAioHandle, try to keep this small.
|
||||
///
|
||||
/// There is an array of these in shared memory. Therefore, this must be Sized.
|
||||
///
|
||||
/// ## Lifecycle of a request
|
||||
///
|
||||
/// A slot is always owned by either the backend process or the communicator
|
||||
/// process, depending on the 'state'. Only the owning process is allowed to
|
||||
/// read or modify the slot, except for reading the 'state' itself to check who
|
||||
/// owns it.
|
||||
///
|
||||
/// A slot begins in the Idle state, where it is owned by the backend process.
|
||||
/// To submit a request, the backend process fills the slot with the request
|
||||
/// data, and changes it to the Submitted state. After changing the state, the
|
||||
/// slot is owned by the communicator process, and the backend is not allowed
|
||||
/// to access it until the communicator process marks it as Completed.
|
||||
///
|
||||
/// When the communicator process sees that the slot is in Submitted state, it
|
||||
/// starts to process the request. After processing the request, it stores the
|
||||
/// result in the slot, and changes the state to Completed. It is now owned by
|
||||
/// the backend process again, which may now read the result, and reuse the
|
||||
/// slot for a new request.
|
||||
///
|
||||
/// For correctness of the above protocol, we really only need two states:
|
||||
/// "owned by backend" and "owned by communicator process". But to help with
|
||||
/// debugging and better assertions, there are a few more states. When the
|
||||
/// backend starts to fill in the request details in the slot, it first sets the
|
||||
/// state from Idle to Filling, and when it's done with that, from Filling to
|
||||
/// Submitted. In the Filling state, the slot is still owned by the
|
||||
/// backend. Similarly, when the communicator process starts to process a
|
||||
/// request, it sets it to Processing state first, but the slot is still owned
|
||||
/// by the communicator process.
|
||||
///
|
||||
/// This struct doesn't handle waking up the communicator process when a request
|
||||
/// has been submitted or when a response is ready. The 'owner_procno' is used
|
||||
/// for waking up the backend on completion, but that happens elsewhere.
|
||||
pub struct NeonIORequestSlot {
|
||||
/// similar to PgAioHandleState
|
||||
state: AtomicNeonIORequestSlotState,
|
||||
|
||||
/// The owning process's ProcNumber. The worker process uses this to set the
|
||||
/// process's latch on completion.
|
||||
///
|
||||
/// (This could be calculated from num_neon_request_slots_per_backend and
|
||||
/// the index of this slot in the overall 'neon_requst_slots array'. But we
|
||||
/// prefer the communicator process to not know how the request slots are
|
||||
/// divided between the backends.)
|
||||
owner_procno: AtomicI32,
|
||||
|
||||
/// SAFETY: This is modified by submit_request(), after it has established
|
||||
/// ownership of the slot by setting state from Idle to Filling
|
||||
request: UnsafeCell<NeonIORequest>,
|
||||
|
||||
/// Valid when state is Completed
|
||||
///
|
||||
/// SAFETY: This is modified by RequestProcessingGuard::complete(). There
|
||||
/// can be only one RequestProcessingGuard outstanding for a slot at a time,
|
||||
/// because it is returned by start_processing_request() which checks the
|
||||
/// state, so RequestProcessingGuard has exclusive access to the slot.
|
||||
result: UnsafeCell<NeonIOResult>,
|
||||
}
|
||||
|
||||
// The protocol described in the "Lifecycle of a request" section above ensures
|
||||
// the safe access to the fields
|
||||
unsafe impl Send for NeonIORequestSlot {}
|
||||
unsafe impl Sync for NeonIORequestSlot {}
|
||||
|
||||
impl Default for NeonIORequestSlot {
|
||||
fn default() -> NeonIORequestSlot {
|
||||
NeonIORequestSlot {
|
||||
owner_procno: AtomicI32::new(-1),
|
||||
request: UnsafeCell::new(NeonIORequest::Empty),
|
||||
result: UnsafeCell::new(NeonIOResult::Empty),
|
||||
state: AtomicNeonIORequestSlotState::new(NeonIORequestSlotState::Idle),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[atomic_enum]
|
||||
#[derive(Eq, PartialEq)]
|
||||
pub enum NeonIORequestSlotState {
|
||||
Idle,
|
||||
|
||||
/// Backend is filling in the request
|
||||
Filling,
|
||||
|
||||
/// Backend has submitted the request to the communicator, but the
|
||||
/// communicator process has not yet started processing it.
|
||||
Submitted,
|
||||
|
||||
/// Communicator is processing the request
|
||||
Processing,
|
||||
|
||||
/// Communicator has completed the request, and the 'result' field is now
|
||||
/// valid, but the backend has not read the result yet.
|
||||
Completed,
|
||||
}
|
||||
|
||||
impl NeonIORequestSlot {
|
||||
/// Write a request to the slot, and mark it as Submitted.
|
||||
///
|
||||
/// Note: This does not wake up the worker process to actually process
|
||||
/// the request. It's the caller's responsibility to do that.
|
||||
pub fn submit_request(&self, request: &NeonIORequest, proc_number: i32) {
|
||||
// Verify that the slot is in Idle state previously, and put it in
|
||||
// Filling state.
|
||||
//
|
||||
// XXX: This step isn't strictly necessary. Assuming the caller didn't
|
||||
// screw up and try to use a slot that's already in use, we could fill
|
||||
// the slot and switch it directly from Idle to Submitted state.
|
||||
if let Err(s) = self.state.compare_exchange(
|
||||
NeonIORequestSlotState::Idle,
|
||||
NeonIORequestSlotState::Filling,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
panic!("unexpected state in request slot: {s:?}");
|
||||
}
|
||||
|
||||
// Fill in the request details
|
||||
self.owner_procno.store(proc_number, Ordering::Relaxed);
|
||||
unsafe { *self.request.get() = *request }
|
||||
|
||||
// This synchronizes-with store/swap in [`start_processing_request`].
|
||||
// Note that this ensures that the previous non-atomic writes visible
|
||||
// to other threads too.
|
||||
self.state
|
||||
.store(NeonIORequestSlotState::Submitted, Ordering::Release);
|
||||
}
|
||||
|
||||
pub fn get_state(&self) -> NeonIORequestSlotState {
|
||||
self.state.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn try_get_result(&self) -> Option<NeonIOResult> {
|
||||
// This synchronizes-with the store/swap in [`RequestProcessingGuard::completed`]
|
||||
let state = self.state.load(Ordering::Acquire);
|
||||
if state == NeonIORequestSlotState::Completed {
|
||||
let result = unsafe { *self.result.get() };
|
||||
self.state
|
||||
.store(NeonIORequestSlotState::Idle, Ordering::Relaxed);
|
||||
Some(result)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the IO request from the slot indicated in the wakeup
|
||||
pub fn start_processing_request<'a>(&'a self) -> Option<RequestProcessingGuard<'a>> {
|
||||
// XXX: using atomic load rather than compare_exchange would be
|
||||
// sufficient here, as long as the communicator process has _some_ means
|
||||
// of tracking which requests it's already processing. That could be a
|
||||
// flag somewhere in communicator's private memory, for example.
|
||||
//
|
||||
// This synchronizes-with the store in [`submit_request`].
|
||||
if let Err(s) = self.state.compare_exchange(
|
||||
NeonIORequestSlotState::Submitted,
|
||||
NeonIORequestSlotState::Processing,
|
||||
Ordering::Acquire,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
// FIXME surprising state. This is unexpected at the moment, but if we
|
||||
// started to process requests more aggressively, without waiting for the
|
||||
// read from the pipe, then this could happen
|
||||
panic!("unexpected state in request slot: {s:?}");
|
||||
}
|
||||
|
||||
Some(RequestProcessingGuard(self))
|
||||
}
|
||||
}
|
||||
|
||||
/// [`NeonIORequestSlot::start_processing_request`] returns this guard object to
|
||||
/// indicate that the the caller now "owns" the slot, until it calls
|
||||
/// [`RequestProcessingGuard::completed`].
|
||||
///
|
||||
/// TODO: implement Drop on this, to mark the request as Aborted or Errored
|
||||
/// if [`RequestProcessingGuard::completed`] is not called.
|
||||
pub struct RequestProcessingGuard<'a>(&'a NeonIORequestSlot);
|
||||
|
||||
unsafe impl<'a> Send for RequestProcessingGuard<'a> {}
|
||||
unsafe impl<'a> Sync for RequestProcessingGuard<'a> {}
|
||||
|
||||
impl<'a> RequestProcessingGuard<'a> {
|
||||
pub fn get_request(&self) -> &NeonIORequest {
|
||||
unsafe { &*self.0.request.get() }
|
||||
}
|
||||
|
||||
pub fn get_owner_procno(&self) -> i32 {
|
||||
self.0.owner_procno.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn completed(self, result: NeonIOResult) {
|
||||
// Store the result to the slot.
|
||||
unsafe {
|
||||
*self.0.result.get() = result;
|
||||
};
|
||||
|
||||
// Mark the request as completed. After that, we no longer have
|
||||
// ownership of the slot, and must not modify it.
|
||||
let old_state = self
|
||||
.0
|
||||
.state
|
||||
.swap(NeonIORequestSlotState::Completed, Ordering::Release);
|
||||
assert!(old_state == NeonIORequestSlotState::Processing);
|
||||
}
|
||||
}
|
||||
283
pgxn/neon/communicator/src/backend_interface.rs
Normal file
283
pgxn/neon/communicator/src/backend_interface.rs
Normal file
@@ -0,0 +1,283 @@
|
||||
//! This code runs in each backend process. That means that launching Rust threads, panicking
|
||||
//! etc. is forbidden!
|
||||
|
||||
use std::os::fd::OwnedFd;
|
||||
|
||||
use crate::backend_comms::NeonIORequestSlot;
|
||||
use crate::init::CommunicatorInitStruct;
|
||||
use crate::integrated_cache::{BackendCacheReadOp, IntegratedCacheReadAccess};
|
||||
use crate::neon_request::{CCachedGetPageVResult, COid};
|
||||
use crate::neon_request::{NeonIORequest, NeonIOResult};
|
||||
|
||||
pub struct CommunicatorBackendStruct<'t> {
|
||||
my_proc_number: i32,
|
||||
|
||||
neon_request_slots: &'t [NeonIORequestSlot],
|
||||
|
||||
submission_pipe_write_fd: OwnedFd,
|
||||
|
||||
pending_cache_read_op: Option<BackendCacheReadOp<'t>>,
|
||||
|
||||
integrated_cache: &'t IntegratedCacheReadAccess<'t>,
|
||||
}
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn rcommunicator_backend_init(
|
||||
cis: Box<CommunicatorInitStruct>,
|
||||
my_proc_number: i32,
|
||||
) -> &'static mut CommunicatorBackendStruct<'static> {
|
||||
if my_proc_number < 0 {
|
||||
panic!("cannot attach to communicator shared memory with procnumber {my_proc_number}");
|
||||
}
|
||||
|
||||
let integrated_cache = Box::leak(Box::new(cis.integrated_cache_init_struct.backend_init()));
|
||||
|
||||
let bs: &'static mut CommunicatorBackendStruct =
|
||||
Box::leak(Box::new(CommunicatorBackendStruct {
|
||||
my_proc_number,
|
||||
neon_request_slots: cis.neon_request_slots,
|
||||
|
||||
submission_pipe_write_fd: cis.submission_pipe_write_fd,
|
||||
pending_cache_read_op: None,
|
||||
|
||||
integrated_cache,
|
||||
}));
|
||||
bs
|
||||
}
|
||||
|
||||
/// Start a request. You can poll for its completion and get the result by
|
||||
/// calling bcomm_poll_dbsize_request_completion(). The communicator will wake
|
||||
/// us up by setting our process latch, so to wait for the completion, wait on
|
||||
/// the latch and call bcomm_poll_dbsize_request_completion() every time the
|
||||
/// latch is set.
|
||||
///
|
||||
/// Safety: The C caller must ensure that the references are valid.
|
||||
/// The requested slot must be free, or this panics.
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn bcomm_start_io_request(
|
||||
bs: &'_ mut CommunicatorBackendStruct,
|
||||
slot_idx: i32,
|
||||
request: &NeonIORequest,
|
||||
immediate_result_ptr: &mut NeonIOResult,
|
||||
) -> i32 {
|
||||
assert!(bs.pending_cache_read_op.is_none());
|
||||
|
||||
// Check if the request can be satisfied from the cache first
|
||||
if let NeonIORequest::RelSize(req) = request {
|
||||
if let Some(nblocks) = bs.integrated_cache.get_rel_size(&req.reltag()) {
|
||||
*immediate_result_ptr = NeonIOResult::RelSize(nblocks);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Create neon request and submit it
|
||||
bs.start_neon_io_request(slot_idx, request);
|
||||
|
||||
slot_idx
|
||||
}
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn bcomm_start_get_page_v_request(
|
||||
bs: &mut CommunicatorBackendStruct,
|
||||
slot_idx: i32,
|
||||
request: &NeonIORequest,
|
||||
immediate_result_ptr: &mut CCachedGetPageVResult,
|
||||
) -> i32 {
|
||||
let NeonIORequest::GetPageV(get_pagev_request) = request else {
|
||||
panic!("invalid request passed to bcomm_start_get_page_v_request()");
|
||||
};
|
||||
assert!(matches!(request, NeonIORequest::GetPageV(_)));
|
||||
assert!(bs.pending_cache_read_op.is_none());
|
||||
|
||||
// Check if the request can be satisfied from the cache first
|
||||
let mut all_cached = true;
|
||||
let mut read_op = bs.integrated_cache.start_read_op();
|
||||
for i in 0..get_pagev_request.nblocks {
|
||||
if let Some(cache_block) = read_op.get_page(
|
||||
&get_pagev_request.reltag(),
|
||||
get_pagev_request.block_number + i as u32,
|
||||
) {
|
||||
immediate_result_ptr.cache_block_numbers[i as usize] = cache_block;
|
||||
} else {
|
||||
// not found in cache
|
||||
all_cached = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if all_cached {
|
||||
bs.pending_cache_read_op = Some(read_op);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create neon request and submit it
|
||||
bs.start_neon_io_request(slot_idx, request);
|
||||
|
||||
slot_idx
|
||||
}
|
||||
|
||||
/// Check if a request has completed. Returns:
|
||||
///
|
||||
/// -1 if the request is still being processed
|
||||
/// 0 on success
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn bcomm_poll_request_completion(
|
||||
bs: &mut CommunicatorBackendStruct,
|
||||
request_slot_idx: u32,
|
||||
result_p: &mut NeonIOResult,
|
||||
) -> i32 {
|
||||
match bs.neon_request_slots[request_slot_idx as usize].try_get_result() {
|
||||
None => -1, // still processing
|
||||
Some(result) => {
|
||||
*result_p = result;
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a request has completed. Returns:
|
||||
///
|
||||
/// 'false' if the slot is Idle. The backend process has ownership.
|
||||
/// 'true' if the slot is busy, and should be polled for result.
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn bcomm_get_request_slot_status(
|
||||
bs: &mut CommunicatorBackendStruct,
|
||||
request_slot_idx: u32,
|
||||
) -> bool {
|
||||
use crate::backend_comms::NeonIORequestSlotState;
|
||||
match bs.neon_request_slots[request_slot_idx as usize].get_state() {
|
||||
NeonIORequestSlotState::Idle => false,
|
||||
NeonIORequestSlotState::Filling => {
|
||||
// 'false' would be the right result here. However, this
|
||||
// is a very transient state. The C code should never
|
||||
// leave a slot in this state, so if it sees that,
|
||||
// something's gone wrong and it's not clear what to do
|
||||
// with it.
|
||||
panic!("unexpected Filling state in request slot {request_slot_idx}");
|
||||
}
|
||||
NeonIORequestSlotState::Submitted => true,
|
||||
NeonIORequestSlotState::Processing => true,
|
||||
NeonIORequestSlotState::Completed => true,
|
||||
}
|
||||
}
|
||||
|
||||
// LFC functions
|
||||
|
||||
/// Finish a local file cache read
|
||||
///
|
||||
//
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn bcomm_finish_cache_read(bs: &mut CommunicatorBackendStruct) -> bool {
|
||||
if let Some(op) = bs.pending_cache_read_op.take() {
|
||||
op.finish()
|
||||
} else {
|
||||
panic!("bcomm_finish_cache_read() called with no cached read pending");
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the local file cache contians the given block
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn bcomm_cache_contains(
|
||||
bs: &mut CommunicatorBackendStruct,
|
||||
spc_oid: COid,
|
||||
db_oid: COid,
|
||||
rel_number: u32,
|
||||
fork_number: u8,
|
||||
block_number: u32,
|
||||
) -> bool {
|
||||
bs.integrated_cache.cache_contains_page(
|
||||
&pageserver_page_api::RelTag {
|
||||
spcnode: spc_oid,
|
||||
dbnode: db_oid,
|
||||
relnode: rel_number,
|
||||
forknum: fork_number,
|
||||
},
|
||||
block_number,
|
||||
)
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FileCacheIterator {
|
||||
next_bucket: u64,
|
||||
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub block_number: u32,
|
||||
}
|
||||
|
||||
/// Iterate over LFC contents
|
||||
#[allow(clippy::missing_safety_doc)]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn bcomm_cache_iterate_begin(
|
||||
_bs: &mut CommunicatorBackendStruct,
|
||||
iter: *mut FileCacheIterator,
|
||||
) {
|
||||
unsafe { (*iter).next_bucket = 0 };
|
||||
}
|
||||
|
||||
#[allow(clippy::missing_safety_doc)]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn bcomm_cache_iterate_next(
|
||||
bs: &mut CommunicatorBackendStruct,
|
||||
iter: *mut FileCacheIterator,
|
||||
) -> bool {
|
||||
use crate::integrated_cache::GetBucketResult;
|
||||
loop {
|
||||
let next_bucket = unsafe { (*iter).next_bucket } as usize;
|
||||
match bs.integrated_cache.get_bucket(next_bucket) {
|
||||
GetBucketResult::Occupied(rel, blk) => {
|
||||
unsafe {
|
||||
(*iter).spc_oid = rel.spcnode;
|
||||
(*iter).db_oid = rel.dbnode;
|
||||
(*iter).rel_number = rel.relnode;
|
||||
(*iter).fork_number = rel.forknum;
|
||||
(*iter).block_number = blk;
|
||||
|
||||
(*iter).next_bucket += 1;
|
||||
}
|
||||
break true;
|
||||
}
|
||||
GetBucketResult::Vacant => {
|
||||
unsafe {
|
||||
(*iter).next_bucket += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
GetBucketResult::OutOfBounds => {
|
||||
break false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> CommunicatorBackendStruct<'t> {
|
||||
/// The slot must be free, or this panics.
|
||||
pub(crate) fn start_neon_io_request(&mut self, request_slot_idx: i32, request: &NeonIORequest) {
|
||||
let my_proc_number = self.my_proc_number;
|
||||
|
||||
self.neon_request_slots[request_slot_idx as usize].submit_request(request, my_proc_number);
|
||||
|
||||
// Tell the communicator about it
|
||||
self.notify_about_request(request_slot_idx);
|
||||
}
|
||||
|
||||
/// Send a wakeup to the communicator process
|
||||
fn notify_about_request(self: &CommunicatorBackendStruct<'t>, request_slot_idx: i32) {
|
||||
// wake up communicator by writing the idx to the submission pipe
|
||||
//
|
||||
|
||||
// This can block, if the pipe is full. That should be very rare,
|
||||
// because the communicator tries hard to drain the pipe to prevent
|
||||
// that. Also, there's a natural upper bound on how many wakeups can be
|
||||
// queued up: there is only a limited number of request slots for each
|
||||
// backend.
|
||||
//
|
||||
// If it does block very briefly, that's not too serious.
|
||||
let idxbuf = request_slot_idx.to_ne_bytes();
|
||||
|
||||
let _res = nix::unistd::write(&self.submission_pipe_write_fd, &idxbuf);
|
||||
// FIXME: check result, return any errors
|
||||
}
|
||||
}
|
||||
162
pgxn/neon/communicator/src/file_cache.rs
Normal file
162
pgxn/neon/communicator/src/file_cache.rs
Normal file
@@ -0,0 +1,162 @@
|
||||
//! Implement the "low-level" parts of the file cache.
|
||||
//!
|
||||
//! This module just deals with reading and writing the file, and keeping track
|
||||
//! which blocks in the cache file are in use and which are free. The "high
|
||||
//! level" parts of tracking which block in the cache file corresponds to which
|
||||
//! relation block is handled in 'integrated_cache' instead.
|
||||
//!
|
||||
//! This module is only used to access the file from the communicator
|
||||
//! process. The backend processes *also* read the file (and sometimes also
|
||||
//! write it? ), but the backends use direct C library calls for that.
|
||||
use std::fs::File;
|
||||
use std::os::unix::fs::FileExt;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::BLCKSZ;
|
||||
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
pub type CacheBlock = u64;
|
||||
|
||||
pub const INVALID_CACHE_BLOCK: CacheBlock = u64::MAX;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileCache {
|
||||
file: Arc<File>,
|
||||
|
||||
free_list: Mutex<FreeList>,
|
||||
|
||||
// metrics
|
||||
max_blocks_gauge: metrics::IntGauge,
|
||||
num_free_blocks_gauge: metrics::IntGauge,
|
||||
}
|
||||
|
||||
// TODO: We keep track of all free blocks in this vec. That doesn't really scale.
|
||||
// Idea: when free_blocks fills up with more than 1024 entries, write them all to
|
||||
// one block on disk.
|
||||
#[derive(Debug)]
|
||||
struct FreeList {
|
||||
next_free_block: CacheBlock,
|
||||
max_blocks: u64,
|
||||
|
||||
free_blocks: Vec<CacheBlock>,
|
||||
}
|
||||
|
||||
impl FileCache {
|
||||
pub fn new(file_cache_path: &Path, mut initial_size: u64) -> Result<FileCache, std::io::Error> {
|
||||
if initial_size < 100 {
|
||||
tracing::warn!(
|
||||
"min size for file cache is 100 blocks, {} requested",
|
||||
initial_size
|
||||
);
|
||||
initial_size = 100;
|
||||
}
|
||||
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(file_cache_path)?;
|
||||
|
||||
let max_blocks_gauge = metrics::IntGauge::new(
|
||||
"file_cache_max_blocks",
|
||||
"Local File Cache size in 8KiB blocks",
|
||||
)
|
||||
.unwrap();
|
||||
let num_free_blocks_gauge = metrics::IntGauge::new(
|
||||
"file_cache_num_free_blocks",
|
||||
"Number of free 8KiB blocks in Local File Cache",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
tracing::info!("initialized file cache with {} blocks", initial_size);
|
||||
|
||||
Ok(FileCache {
|
||||
file: Arc::new(file),
|
||||
free_list: Mutex::new(FreeList {
|
||||
next_free_block: 0,
|
||||
max_blocks: initial_size,
|
||||
free_blocks: Vec::new(),
|
||||
}),
|
||||
max_blocks_gauge,
|
||||
num_free_blocks_gauge,
|
||||
})
|
||||
}
|
||||
|
||||
// File cache management
|
||||
|
||||
pub async fn read_block(
|
||||
&self,
|
||||
cache_block: CacheBlock,
|
||||
mut dst: impl uring_common::buf::IoBufMut + Send + Sync,
|
||||
) -> Result<(), std::io::Error> {
|
||||
assert!(dst.bytes_total() == BLCKSZ);
|
||||
let file = self.file.clone();
|
||||
|
||||
let dst_ref = unsafe { std::slice::from_raw_parts_mut(dst.stable_mut_ptr(), BLCKSZ) };
|
||||
|
||||
spawn_blocking(move || file.read_exact_at(dst_ref, cache_block * BLCKSZ as u64)).await??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn write_block(
|
||||
&self,
|
||||
cache_block: CacheBlock,
|
||||
src: impl uring_common::buf::IoBuf + Send + Sync,
|
||||
) -> Result<(), std::io::Error> {
|
||||
assert!(src.bytes_init() == BLCKSZ);
|
||||
let file = self.file.clone();
|
||||
|
||||
let src_ref = unsafe { std::slice::from_raw_parts(src.stable_ptr(), BLCKSZ) };
|
||||
|
||||
spawn_blocking(move || file.write_all_at(src_ref, cache_block * BLCKSZ as u64)).await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn alloc_block(&self) -> Option<CacheBlock> {
|
||||
let mut free_list = self.free_list.lock().unwrap();
|
||||
if let Some(x) = free_list.free_blocks.pop() {
|
||||
return Some(x);
|
||||
}
|
||||
if free_list.next_free_block < free_list.max_blocks {
|
||||
let result = free_list.next_free_block;
|
||||
free_list.next_free_block += 1;
|
||||
return Some(result);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn dealloc_block(&self, cache_block: CacheBlock) {
|
||||
let mut free_list = self.free_list.lock().unwrap();
|
||||
free_list.free_blocks.push(cache_block);
|
||||
}
|
||||
}
|
||||
|
||||
impl metrics::core::Collector for FileCache {
|
||||
fn desc(&self) -> Vec<&metrics::core::Desc> {
|
||||
let mut descs = Vec::new();
|
||||
descs.append(&mut self.max_blocks_gauge.desc());
|
||||
descs.append(&mut self.num_free_blocks_gauge.desc());
|
||||
descs
|
||||
}
|
||||
fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
|
||||
// Update the gauges with fresh values first
|
||||
{
|
||||
let free_list = self.free_list.lock().unwrap();
|
||||
self.max_blocks_gauge.set(free_list.max_blocks as i64);
|
||||
|
||||
let total_free_blocks: i64 = free_list.free_blocks.len() as i64
|
||||
+ (free_list.max_blocks as i64 - free_list.next_free_block as i64);
|
||||
self.num_free_blocks_gauge.set(total_free_blocks);
|
||||
}
|
||||
|
||||
let mut values = Vec::new();
|
||||
values.append(&mut self.max_blocks_gauge.collect());
|
||||
values.append(&mut self.num_free_blocks_gauge.collect());
|
||||
values
|
||||
}
|
||||
}
|
||||
109
pgxn/neon/communicator/src/global_allocator.rs
Normal file
109
pgxn/neon/communicator/src/global_allocator.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
//! Global allocator, for tracking memory usage of the Rust parts
|
||||
//!
|
||||
//! Postgres is designed to handle allocation failure (ie. malloc() returning NULL) gracefully. It
|
||||
//! rolls backs the transaction and gives the user an "ERROR: out of memory" error. Rust code
|
||||
//! however panics if an allocation fails. We don't want that to ever happen, because an unhandled
|
||||
//! panic leads to Postgres crash and restart. Our strategy is to pre-allocate a large enough chunk
|
||||
//! of memory for use by the Rust code, so that the allocations never fail.
|
||||
//!
|
||||
//! To pick the size for the pre-allocated chunk, we have a metric to track the high watermark
|
||||
//! memory usage of all the Rust allocations in total.
|
||||
//!
|
||||
//! TODO:
|
||||
//!
|
||||
//! - Currently we just export the metrics. Actual allocations are still just passed through to
|
||||
//! the system allocator.
|
||||
//! - Take padding etc. overhead into account
|
||||
|
||||
use std::alloc::{GlobalAlloc, Layout, System};
|
||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||
|
||||
use metrics::IntGauge;
|
||||
|
||||
struct MyAllocator {
|
||||
allocations: AtomicU64,
|
||||
deallocations: AtomicU64,
|
||||
|
||||
allocated: AtomicUsize,
|
||||
high: AtomicUsize,
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for MyAllocator {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
self.allocations.fetch_add(1, Ordering::Relaxed);
|
||||
let mut allocated = self.allocated.fetch_add(layout.size(), Ordering::Relaxed);
|
||||
allocated += layout.size();
|
||||
self.high.fetch_max(allocated, Ordering::Relaxed);
|
||||
unsafe { System.alloc(layout) }
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
self.deallocations.fetch_add(1, Ordering::Relaxed);
|
||||
self.allocated.fetch_sub(layout.size(), Ordering::Relaxed);
|
||||
unsafe { System.dealloc(ptr, layout) }
|
||||
}
|
||||
}
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: MyAllocator = MyAllocator {
|
||||
allocations: AtomicU64::new(0),
|
||||
deallocations: AtomicU64::new(0),
|
||||
allocated: AtomicUsize::new(0),
|
||||
high: AtomicUsize::new(0),
|
||||
};
|
||||
|
||||
pub struct MyAllocatorCollector {
|
||||
allocations: IntGauge,
|
||||
deallocations: IntGauge,
|
||||
allocated: IntGauge,
|
||||
high: IntGauge,
|
||||
}
|
||||
|
||||
impl MyAllocatorCollector {
|
||||
pub fn new() -> MyAllocatorCollector {
|
||||
MyAllocatorCollector {
|
||||
allocations: IntGauge::new("allocations_total", "Number of allocations in Rust code")
|
||||
.unwrap(),
|
||||
deallocations: IntGauge::new(
|
||||
"deallocations_total",
|
||||
"Number of deallocations in Rust code",
|
||||
)
|
||||
.unwrap(),
|
||||
allocated: IntGauge::new("allocated_total", "Bytes currently allocated").unwrap(),
|
||||
high: IntGauge::new("allocated_high", "High watermark of allocated bytes").unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl metrics::core::Collector for MyAllocatorCollector {
|
||||
fn desc(&self) -> Vec<&metrics::core::Desc> {
|
||||
let mut descs = Vec::new();
|
||||
|
||||
descs.append(&mut self.allocations.desc());
|
||||
descs.append(&mut self.deallocations.desc());
|
||||
descs.append(&mut self.allocated.desc());
|
||||
descs.append(&mut self.high.desc());
|
||||
|
||||
descs
|
||||
}
|
||||
|
||||
fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
|
||||
let mut values = Vec::new();
|
||||
|
||||
// update the gauges
|
||||
self.allocations
|
||||
.set(GLOBAL.allocations.load(Ordering::Relaxed) as i64);
|
||||
self.deallocations
|
||||
.set(GLOBAL.allocations.load(Ordering::Relaxed) as i64);
|
||||
self.allocated
|
||||
.set(GLOBAL.allocated.load(Ordering::Relaxed) as i64);
|
||||
self.high.set(GLOBAL.high.load(Ordering::Relaxed) as i64);
|
||||
|
||||
values.append(&mut self.allocations.collect());
|
||||
values.append(&mut self.deallocations.collect());
|
||||
values.append(&mut self.allocated.collect());
|
||||
values.append(&mut self.high.collect());
|
||||
|
||||
values
|
||||
}
|
||||
}
|
||||
168
pgxn/neon/communicator/src/init.rs
Normal file
168
pgxn/neon/communicator/src/init.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
//! Initialization functions. These are executed in the postmaster process,
|
||||
//! at different stages of server startup.
|
||||
//!
|
||||
//!
|
||||
//! Communicator initialization steps:
|
||||
//!
|
||||
//! 1. At postmaster startup, before shared memory is allocated,
|
||||
//! rcommunicator_shmem_size() is called to get the amount of
|
||||
//! shared memory that this module needs.
|
||||
//!
|
||||
//! 2. Later, after the shared memory has been allocated,
|
||||
//! rcommunicator_shmem_init() is called to initialize the shmem
|
||||
//! area.
|
||||
//!
|
||||
//! Per process initialization:
|
||||
//!
|
||||
//! When a backend process starts up, it calls rcommunicator_backend_init().
|
||||
//! In the communicator worker process, other functions are called, see
|
||||
//! `worker_process` module.
|
||||
|
||||
use std::ffi::c_int;
|
||||
use std::mem;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::os::fd::OwnedFd;
|
||||
|
||||
use crate::backend_comms::NeonIORequestSlot;
|
||||
use crate::integrated_cache::IntegratedCacheInitStruct;
|
||||
|
||||
/// This struct is created in the postmaster process, and inherited to
|
||||
/// the communicator process and all backend processes through fork()
|
||||
#[repr(C)]
|
||||
pub struct CommunicatorInitStruct {
|
||||
pub submission_pipe_read_fd: OwnedFd,
|
||||
pub submission_pipe_write_fd: OwnedFd,
|
||||
|
||||
// Shared memory data structures
|
||||
pub num_neon_request_slots: u32,
|
||||
|
||||
pub neon_request_slots: &'static [NeonIORequestSlot],
|
||||
|
||||
pub integrated_cache_init_struct: IntegratedCacheInitStruct<'static>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CommunicatorInitStruct {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
|
||||
fmt.debug_struct("CommunicatorInitStruct")
|
||||
.field("submission_pipe_read_fd", &self.submission_pipe_read_fd)
|
||||
.field("submission_pipe_write_fd", &self.submission_pipe_write_fd)
|
||||
.field("num_neon_request_slots", &self.num_neon_request_slots)
|
||||
.field("neon_request_slots length", &self.neon_request_slots.len())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn rcommunicator_shmem_size(num_neon_request_slots: u32) -> u64 {
|
||||
let mut size = 0;
|
||||
|
||||
size += mem::size_of::<NeonIORequestSlot>() * num_neon_request_slots as usize;
|
||||
|
||||
// For integrated_cache's Allocator. TODO: make this adjustable
|
||||
size += IntegratedCacheInitStruct::shmem_size();
|
||||
|
||||
size as u64
|
||||
}
|
||||
|
||||
/// Initialize the shared memory segment. Returns a backend-private
|
||||
/// struct, which will be inherited by backend processes through fork
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn rcommunicator_shmem_init(
|
||||
submission_pipe_read_fd: c_int,
|
||||
submission_pipe_write_fd: c_int,
|
||||
num_neon_request_slots: u32,
|
||||
shmem_area_ptr: *mut MaybeUninit<u8>,
|
||||
shmem_area_len: u64,
|
||||
initial_file_cache_size: u64,
|
||||
max_file_cache_size: u64,
|
||||
) -> &'static mut CommunicatorInitStruct {
|
||||
let shmem_area: &'static mut [MaybeUninit<u8>] =
|
||||
unsafe { std::slice::from_raw_parts_mut(shmem_area_ptr, shmem_area_len as usize) };
|
||||
|
||||
let (neon_request_slots, remaining_area) =
|
||||
alloc_array_from_slice::<NeonIORequestSlot>(shmem_area, num_neon_request_slots as usize);
|
||||
|
||||
for slot in neon_request_slots.iter_mut() {
|
||||
slot.write(NeonIORequestSlot::default());
|
||||
}
|
||||
|
||||
// 'neon_request_slots' is initialized now. (MaybeUninit::slice_assume_init_mut() is nightly-only
|
||||
// as of this writing.)
|
||||
let neon_request_slots = unsafe {
|
||||
std::mem::transmute::<&mut [MaybeUninit<NeonIORequestSlot>], &mut [NeonIORequestSlot]>(
|
||||
neon_request_slots,
|
||||
)
|
||||
};
|
||||
|
||||
// Give the rest of the area to the integrated cache
|
||||
let integrated_cache_init_struct = IntegratedCacheInitStruct::shmem_init(
|
||||
remaining_area,
|
||||
initial_file_cache_size,
|
||||
max_file_cache_size,
|
||||
);
|
||||
|
||||
let (submission_pipe_read_fd, submission_pipe_write_fd) = unsafe {
|
||||
use std::os::fd::FromRawFd;
|
||||
(
|
||||
OwnedFd::from_raw_fd(submission_pipe_read_fd),
|
||||
OwnedFd::from_raw_fd(submission_pipe_write_fd),
|
||||
)
|
||||
};
|
||||
|
||||
let cis: &'static mut CommunicatorInitStruct = Box::leak(Box::new(CommunicatorInitStruct {
|
||||
submission_pipe_read_fd,
|
||||
submission_pipe_write_fd,
|
||||
|
||||
num_neon_request_slots,
|
||||
neon_request_slots,
|
||||
|
||||
integrated_cache_init_struct,
|
||||
}));
|
||||
|
||||
cis
|
||||
}
|
||||
|
||||
// fixme: currently unused
|
||||
#[allow(dead_code)]
|
||||
pub fn alloc_from_slice<T>(
|
||||
area: &mut [MaybeUninit<u8>],
|
||||
) -> (&mut MaybeUninit<T>, &mut [MaybeUninit<u8>]) {
|
||||
let layout = std::alloc::Layout::new::<T>();
|
||||
|
||||
let area_start = area.as_mut_ptr();
|
||||
|
||||
// pad to satisfy alignment requirements
|
||||
let padding = area_start.align_offset(layout.align());
|
||||
if padding + layout.size() > area.len() {
|
||||
panic!("out of memory");
|
||||
}
|
||||
let area = &mut area[padding..];
|
||||
let (result_area, remain) = area.split_at_mut(layout.size());
|
||||
|
||||
let result_ptr: *mut MaybeUninit<T> = result_area.as_mut_ptr().cast();
|
||||
let result = unsafe { result_ptr.as_mut().unwrap() };
|
||||
|
||||
(result, remain)
|
||||
}
|
||||
|
||||
pub fn alloc_array_from_slice<T>(
|
||||
area: &mut [MaybeUninit<u8>],
|
||||
len: usize,
|
||||
) -> (&mut [MaybeUninit<T>], &mut [MaybeUninit<u8>]) {
|
||||
let layout = std::alloc::Layout::new::<T>();
|
||||
|
||||
let area_start = area.as_mut_ptr();
|
||||
|
||||
// pad to satisfy alignment requirements
|
||||
let padding = area_start.align_offset(layout.align());
|
||||
if padding + layout.size() * len > area.len() {
|
||||
panic!("out of memory");
|
||||
}
|
||||
let area = &mut area[padding..];
|
||||
let (result_area, remain) = area.split_at_mut(layout.size() * len);
|
||||
|
||||
let result_ptr: *mut MaybeUninit<T> = result_area.as_mut_ptr().cast();
|
||||
let result = unsafe { std::slice::from_raw_parts_mut(result_ptr.as_mut().unwrap(), len) };
|
||||
|
||||
(result, remain)
|
||||
}
|
||||
824
pgxn/neon/communicator/src/integrated_cache.rs
Normal file
824
pgxn/neon/communicator/src/integrated_cache.rs
Normal file
@@ -0,0 +1,824 @@
|
||||
//! Integrated communicator cache
|
||||
//!
|
||||
//! It tracks:
|
||||
//! - Relation sizes and existence
|
||||
//! - Last-written LSN
|
||||
//! - Block cache (also known as LFC)
|
||||
//!
|
||||
//! TODO: limit the size
|
||||
//! TODO: concurrency
|
||||
//!
|
||||
//! Note: This deals with "relations" which is really just one "relation fork" in Postgres
|
||||
//! terms. RelFileLocator + ForkNumber is the key.
|
||||
|
||||
//
|
||||
// TODO: Thoughts on eviction:
|
||||
//
|
||||
// There are two things we need to track, and evict if we run out of space:
|
||||
// - blocks in the file cache's file. If the file grows too large, need to evict something.
|
||||
// Also if the cache is resized
|
||||
//
|
||||
// - entries in the cache map. If we run out of memory in the shmem area, need to evict
|
||||
// something
|
||||
//
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
|
||||
|
||||
use utils::lsn::{AtomicLsn, Lsn};
|
||||
|
||||
use crate::file_cache::INVALID_CACHE_BLOCK;
|
||||
use crate::file_cache::{CacheBlock, FileCache};
|
||||
use pageserver_page_api::RelTag;
|
||||
|
||||
use metrics::{IntCounter, IntGauge};
|
||||
|
||||
use neon_shmem::hash::{HashMapInit, entry::Entry};
|
||||
use neon_shmem::shmem::ShmemHandle;
|
||||
|
||||
// in # of entries
|
||||
const RELSIZE_CACHE_SIZE: u32 = 64 * 1024;
|
||||
|
||||
/// This struct is initialized at postmaster startup, and passed to all the processes via fork().
|
||||
pub struct IntegratedCacheInitStruct<'t> {
|
||||
relsize_cache_handle: HashMapInit<'t, RelKey, RelEntry>,
|
||||
block_map_handle: HashMapInit<'t, BlockKey, BlockEntry>,
|
||||
}
|
||||
|
||||
/// Represents write-access to the integrated cache. This is used by the communicator process.
|
||||
#[derive(Debug)]
|
||||
pub struct IntegratedCacheWriteAccess<'t> {
|
||||
relsize_cache: neon_shmem::hash::HashMapAccess<'t, RelKey, RelEntry>,
|
||||
block_map: neon_shmem::hash::HashMapAccess<'t, BlockKey, BlockEntry>,
|
||||
|
||||
global_lw_lsn: AtomicU64,
|
||||
|
||||
pub(crate) file_cache: Option<FileCache>,
|
||||
|
||||
// Fields for eviction
|
||||
clock_hand: std::sync::Mutex<usize>,
|
||||
|
||||
// Metrics
|
||||
page_evictions_counter: IntCounter,
|
||||
clock_iterations_counter: IntCounter,
|
||||
|
||||
// metrics from the hash map
|
||||
block_map_num_buckets: IntGauge,
|
||||
block_map_num_buckets_in_use: IntGauge,
|
||||
|
||||
relsize_cache_num_buckets: IntGauge,
|
||||
relsize_cache_num_buckets_in_use: IntGauge,
|
||||
}
|
||||
|
||||
/// Represents read-only access to the integrated cache. Backend processes have this.
|
||||
pub struct IntegratedCacheReadAccess<'t> {
|
||||
relsize_cache: neon_shmem::hash::HashMapAccess<'t, RelKey, RelEntry>,
|
||||
block_map: neon_shmem::hash::HashMapAccess<'t, BlockKey, BlockEntry>,
|
||||
}
|
||||
|
||||
impl<'t> IntegratedCacheInitStruct<'t> {
|
||||
/// Return the desired size in bytes of the fixed-size shared memory area to reserve for the
|
||||
/// integrated cache.
|
||||
pub fn shmem_size() -> usize {
|
||||
// The relsize cache is fixed-size. The block map is allocated in a separate resizable
|
||||
// area.
|
||||
HashMapInit::<RelKey, RelEntry>::estimate_size(RELSIZE_CACHE_SIZE)
|
||||
}
|
||||
|
||||
/// Initialize the shared memory segment. This runs once in postmaster. Returns a struct which
|
||||
/// will be inherited by all processes through fork.
|
||||
pub fn shmem_init(
|
||||
shmem_area: &'t mut [MaybeUninit<u8>],
|
||||
initial_file_cache_size: u64,
|
||||
max_file_cache_size: u64,
|
||||
) -> IntegratedCacheInitStruct<'t> {
|
||||
// Initialize the relsize cache in the fixed-size area
|
||||
let relsize_cache_handle =
|
||||
neon_shmem::hash::HashMapInit::with_fixed(RELSIZE_CACHE_SIZE, shmem_area);
|
||||
|
||||
let max_bytes =
|
||||
HashMapInit::<BlockKey, BlockEntry>::estimate_size(max_file_cache_size as u32);
|
||||
|
||||
// Initialize the block map in a separate resizable shared memory area
|
||||
let shmem_handle = ShmemHandle::new("block mapping", 0, max_bytes).unwrap();
|
||||
|
||||
let block_map_handle =
|
||||
neon_shmem::hash::HashMapInit::with_shmem(initial_file_cache_size as u32, shmem_handle);
|
||||
IntegratedCacheInitStruct {
|
||||
relsize_cache_handle,
|
||||
block_map_handle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize access to the integrated cache for the communicator worker process
|
||||
pub fn worker_process_init(
|
||||
self,
|
||||
lsn: Lsn,
|
||||
file_cache: Option<FileCache>,
|
||||
) -> IntegratedCacheWriteAccess<'t> {
|
||||
let IntegratedCacheInitStruct {
|
||||
relsize_cache_handle,
|
||||
block_map_handle,
|
||||
} = self;
|
||||
IntegratedCacheWriteAccess {
|
||||
relsize_cache: relsize_cache_handle.attach_writer(),
|
||||
block_map: block_map_handle.attach_writer(),
|
||||
global_lw_lsn: AtomicU64::new(lsn.0),
|
||||
file_cache,
|
||||
clock_hand: std::sync::Mutex::new(0),
|
||||
|
||||
page_evictions_counter: metrics::IntCounter::new(
|
||||
"integrated_cache_evictions",
|
||||
"Page evictions from the Local File Cache",
|
||||
)
|
||||
.unwrap(),
|
||||
|
||||
clock_iterations_counter: metrics::IntCounter::new(
|
||||
"clock_iterations",
|
||||
"Number of times the clock hand has moved",
|
||||
)
|
||||
.unwrap(),
|
||||
|
||||
block_map_num_buckets: metrics::IntGauge::new(
|
||||
"block_map_num_buckets",
|
||||
"Allocated size of the block cache hash map",
|
||||
)
|
||||
.unwrap(),
|
||||
block_map_num_buckets_in_use: metrics::IntGauge::new(
|
||||
"block_map_num_buckets_in_use",
|
||||
"Number of buckets in use in the block cache hash map",
|
||||
)
|
||||
.unwrap(),
|
||||
|
||||
relsize_cache_num_buckets: metrics::IntGauge::new(
|
||||
"relsize_cache_num_buckets",
|
||||
"Allocated size of the relsize cache hash map",
|
||||
)
|
||||
.unwrap(),
|
||||
relsize_cache_num_buckets_in_use: metrics::IntGauge::new(
|
||||
"relsize_cache_num_buckets_in_use",
|
||||
"Number of buckets in use in the relsize cache hash map",
|
||||
)
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize access to the integrated cache for a backend process
|
||||
pub fn backend_init(self) -> IntegratedCacheReadAccess<'t> {
|
||||
let IntegratedCacheInitStruct {
|
||||
relsize_cache_handle,
|
||||
block_map_handle,
|
||||
} = self;
|
||||
|
||||
IntegratedCacheReadAccess {
|
||||
relsize_cache: relsize_cache_handle.attach_reader(),
|
||||
block_map: block_map_handle.attach_reader(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Value stored in the cache mapping hash table.
|
||||
struct BlockEntry {
|
||||
lw_lsn: AtomicLsn,
|
||||
cache_block: AtomicU64,
|
||||
|
||||
pinned: AtomicU64,
|
||||
|
||||
// 'referenced' bit for the clock algorithm
|
||||
referenced: AtomicBool,
|
||||
}
|
||||
|
||||
/// Value stored in the relsize cache hash table.
|
||||
struct RelEntry {
|
||||
/// cached size of the relation
|
||||
/// u32::MAX means 'not known' (that's InvalidBlockNumber in Postgres)
|
||||
nblocks: AtomicU32,
|
||||
|
||||
/// This is the last time the "metadata" of this relation changed, not
|
||||
/// the contents of the blocks. That is, the size of the relation.
|
||||
lw_lsn: AtomicLsn,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for RelEntry {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
|
||||
fmt.debug_struct("Rel")
|
||||
.field("nblocks", &self.nblocks.load(Ordering::Relaxed))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
impl std::fmt::Debug for BlockEntry {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
|
||||
fmt.debug_struct("Block")
|
||||
.field("lw_lsn", &self.lw_lsn.load())
|
||||
.field("cache_block", &self.cache_block.load(Ordering::Relaxed))
|
||||
.field("pinned", &self.pinned.load(Ordering::Relaxed))
|
||||
.field("referenced", &self.referenced.load(Ordering::Relaxed))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Hash, Ord)]
|
||||
struct RelKey(RelTag);
|
||||
|
||||
impl From<&RelTag> for RelKey {
|
||||
fn from(val: &RelTag) -> RelKey {
|
||||
RelKey(*val)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Hash, Ord)]
|
||||
struct BlockKey {
|
||||
rel: RelTag,
|
||||
block_number: u32,
|
||||
}
|
||||
|
||||
impl From<(&RelTag, u32)> for BlockKey {
|
||||
fn from(val: (&RelTag, u32)) -> BlockKey {
|
||||
BlockKey {
|
||||
rel: *val.0,
|
||||
block_number: val.1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return type used in the cache's get_*() functions. 'Found' means that the page, or other
|
||||
/// information that was enqueried, exists in the cache. '
|
||||
pub enum CacheResult<V> {
|
||||
/// The enqueried page or other information existed in the cache.
|
||||
Found(V),
|
||||
|
||||
/// The cache doesn't contain the page (or other enqueried information, like relation size). The
|
||||
/// Lsn is the 'not_modified_since' LSN that should be used in the request to the pageserver to
|
||||
/// read the page.
|
||||
NotFound(Lsn),
|
||||
}
|
||||
|
||||
impl<'t> IntegratedCacheWriteAccess<'t> {
|
||||
pub fn get_rel_size(&'t self, rel: &RelTag) -> CacheResult<u32> {
|
||||
if let Some(nblocks) = get_rel_size(&self.relsize_cache, rel) {
|
||||
CacheResult::Found(nblocks)
|
||||
} else {
|
||||
let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed));
|
||||
CacheResult::NotFound(lsn)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_page(
|
||||
&'t self,
|
||||
rel: &RelTag,
|
||||
block_number: u32,
|
||||
dst: impl uring_common::buf::IoBufMut + Send + Sync,
|
||||
) -> Result<CacheResult<()>, std::io::Error> {
|
||||
let x = if let Some(block_entry) = self.block_map.get(&BlockKey::from((rel, block_number)))
|
||||
{
|
||||
block_entry.referenced.store(true, Ordering::Relaxed);
|
||||
|
||||
let cache_block = block_entry.cache_block.load(Ordering::Relaxed);
|
||||
if cache_block != INVALID_CACHE_BLOCK {
|
||||
// pin it and release lock
|
||||
block_entry.pinned.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
(cache_block, DeferredUnpin(block_entry.pinned.as_ptr()))
|
||||
} else {
|
||||
return Ok(CacheResult::NotFound(block_entry.lw_lsn.load()));
|
||||
}
|
||||
} else {
|
||||
let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed));
|
||||
return Ok(CacheResult::NotFound(lsn));
|
||||
};
|
||||
|
||||
let (cache_block, _deferred_pin) = x;
|
||||
self.file_cache
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read_block(cache_block, dst)
|
||||
.await?;
|
||||
|
||||
// unpin the entry (by implicitly dropping deferred_pin)
|
||||
Ok(CacheResult::Found(()))
|
||||
}
|
||||
|
||||
pub async fn page_is_cached(
|
||||
&'t self,
|
||||
rel: &RelTag,
|
||||
block_number: u32,
|
||||
) -> Result<CacheResult<()>, std::io::Error> {
|
||||
if let Some(block_entry) = self.block_map.get(&BlockKey::from((rel, block_number))) {
|
||||
// This is used for prefetch requests. Treat the probe as an 'access', to keep it
|
||||
// in cache.
|
||||
block_entry.referenced.store(true, Ordering::Relaxed);
|
||||
|
||||
let cache_block = block_entry.cache_block.load(Ordering::Relaxed);
|
||||
|
||||
if cache_block != INVALID_CACHE_BLOCK {
|
||||
Ok(CacheResult::Found(()))
|
||||
} else {
|
||||
Ok(CacheResult::NotFound(block_entry.lw_lsn.load()))
|
||||
}
|
||||
} else {
|
||||
let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed));
|
||||
Ok(CacheResult::NotFound(lsn))
|
||||
}
|
||||
}
|
||||
|
||||
/// Does the relation exists? CacheResult::NotFound means that the cache doesn't contain that
|
||||
/// information, i.e. we don't know if the relation exists or not.
|
||||
pub fn get_rel_exists(&'t self, rel: &RelTag) -> CacheResult<bool> {
|
||||
// we don't currently cache negative entries, so if the relation is in the cache, it exists
|
||||
if let Some(_rel_entry) = self.relsize_cache.get(&RelKey::from(rel)) {
|
||||
CacheResult::Found(true)
|
||||
} else {
|
||||
let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed));
|
||||
CacheResult::NotFound(lsn)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_db_size(&'t self, _db_oid: u32) -> CacheResult<u64> {
|
||||
// TODO: it would be nice to cache database sizes too. Getting the database size
|
||||
// is not a very common operation, but when you do it, it's often interactive, with
|
||||
// e.g. psql \l+ command, so the user will feel the latency.
|
||||
|
||||
// fixme: is this right lsn?
|
||||
let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed));
|
||||
CacheResult::NotFound(lsn)
|
||||
}
|
||||
|
||||
pub fn remember_rel_size(&'t self, rel: &RelTag, nblocks: u32, lsn: Lsn) {
|
||||
match self.relsize_cache.entry(RelKey::from(rel)) {
|
||||
Entry::Vacant(e) => {
|
||||
tracing::info!("inserting rel entry for {rel:?}, {nblocks} blocks");
|
||||
// FIXME: what to do if we run out of memory? Evict other relation entries?
|
||||
_ = e
|
||||
.insert(RelEntry {
|
||||
nblocks: AtomicU32::new(nblocks),
|
||||
lw_lsn: AtomicLsn::new(lsn.0),
|
||||
})
|
||||
.expect("out of memory");
|
||||
}
|
||||
Entry::Occupied(e) => {
|
||||
tracing::info!("updating rel entry for {rel:?}, {nblocks} blocks");
|
||||
e.get().nblocks.store(nblocks, Ordering::Relaxed);
|
||||
e.get().lw_lsn.store(lsn);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Remember the given page contents in the cache.
|
||||
pub async fn remember_page(
|
||||
&'t self,
|
||||
rel: &RelTag,
|
||||
block_number: u32,
|
||||
src: impl uring_common::buf::IoBuf + Send + Sync,
|
||||
lw_lsn: Lsn,
|
||||
is_write: bool,
|
||||
) {
|
||||
let key = BlockKey::from((rel, block_number));
|
||||
|
||||
// FIXME: make this work when file cache is disabled. Or make it mandatory
|
||||
let file_cache = self.file_cache.as_ref().unwrap();
|
||||
|
||||
if is_write {
|
||||
// there should be no concurrent IOs. If a backend tries to read the page
|
||||
// at the same time, they may get a torn write. That's the same as with
|
||||
// regular POSIX filesystem read() and write()
|
||||
|
||||
// First check if we have a block in cache already
|
||||
let mut old_cache_block = None;
|
||||
let mut found_existing = false;
|
||||
|
||||
// NOTE(quantumish): honoring original semantics here (used to be update_with_fn)
|
||||
// but I don't see any reason why this has to take a write lock.
|
||||
if let Entry::Occupied(e) = self.block_map.entry(key.clone()) {
|
||||
let block_entry = e.get();
|
||||
found_existing = true;
|
||||
|
||||
// Prevent this entry from being evicted
|
||||
let pin_count = block_entry.pinned.fetch_add(1, Ordering::Relaxed);
|
||||
if pin_count > 0 {
|
||||
// this is unexpected, because the caller has obtained the io-in-progress lock,
|
||||
// so no one else should try to modify the page at the same time.
|
||||
// XXX: and I think a read should not be happening either, because the postgres
|
||||
// buffer is held locked. TODO: check these conditions and tidy this up a little. Seems fragile to just panic.
|
||||
panic!("block entry was unexpectedly pinned");
|
||||
}
|
||||
|
||||
let cache_block = block_entry.cache_block.load(Ordering::Relaxed);
|
||||
old_cache_block = if cache_block != INVALID_CACHE_BLOCK {
|
||||
Some(cache_block)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}
|
||||
|
||||
// Allocate a new block if required
|
||||
let cache_block = old_cache_block.unwrap_or_else(|| {
|
||||
loop {
|
||||
if let Some(x) = file_cache.alloc_block() {
|
||||
break x;
|
||||
}
|
||||
if let Some(x) = self.try_evict_one_cache_block() {
|
||||
break x;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Write the page to the cache file
|
||||
file_cache
|
||||
.write_block(cache_block, src)
|
||||
.await
|
||||
.expect("error writing to cache");
|
||||
// FIXME: handle errors gracefully.
|
||||
// FIXME: unpin the block entry on error
|
||||
|
||||
// Update the block entry
|
||||
let entry = self.block_map.entry(key);
|
||||
assert_eq!(found_existing, matches!(entry, Entry::Occupied(_)));
|
||||
match entry {
|
||||
Entry::Occupied(e) => {
|
||||
let block_entry = e.get();
|
||||
// Update the cache block
|
||||
let old_blk = block_entry.cache_block.compare_exchange(
|
||||
INVALID_CACHE_BLOCK,
|
||||
cache_block,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
assert!(old_blk == Ok(INVALID_CACHE_BLOCK) || old_blk == Err(cache_block));
|
||||
|
||||
block_entry.lw_lsn.store(lw_lsn);
|
||||
|
||||
block_entry.referenced.store(true, Ordering::Relaxed);
|
||||
|
||||
let pin_count = block_entry.pinned.fetch_sub(1, Ordering::Relaxed);
|
||||
assert!(pin_count > 0);
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
// FIXME: what to do if we run out of memory? Evict other relation entries? Remove
|
||||
// block entries first?
|
||||
_ = e
|
||||
.insert(BlockEntry {
|
||||
lw_lsn: AtomicLsn::new(lw_lsn.0),
|
||||
cache_block: AtomicU64::new(cache_block),
|
||||
pinned: AtomicU64::new(0),
|
||||
referenced: AtomicBool::new(true),
|
||||
})
|
||||
.expect("out of memory");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// !is_write
|
||||
//
|
||||
// We can assume that it doesn't already exist, because the
|
||||
// caller is assumed to have already checked it, and holds
|
||||
// the io-in-progress lock. (The BlockEntry might exist, but no cache block)
|
||||
|
||||
// Allocate a new block first
|
||||
let cache_block = {
|
||||
loop {
|
||||
if let Some(x) = file_cache.alloc_block() {
|
||||
break x;
|
||||
}
|
||||
if let Some(x) = self.try_evict_one_cache_block() {
|
||||
break x;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Write the page to the cache file
|
||||
file_cache
|
||||
.write_block(cache_block, src)
|
||||
.await
|
||||
.expect("error writing to cache");
|
||||
// FIXME: handle errors gracefully.
|
||||
|
||||
match self.block_map.entry(key) {
|
||||
Entry::Occupied(e) => {
|
||||
let block_entry = e.get();
|
||||
// FIXME: could there be concurrent readers?
|
||||
assert!(block_entry.pinned.load(Ordering::Relaxed) == 0);
|
||||
|
||||
let old_cache_block =
|
||||
block_entry.cache_block.swap(cache_block, Ordering::Relaxed);
|
||||
if old_cache_block != INVALID_CACHE_BLOCK {
|
||||
panic!(
|
||||
"remember_page called in !is_write mode, but page is already cached at blk {old_cache_block}"
|
||||
);
|
||||
}
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
// FIXME: what to do if we run out of memory? Evict other relation entries? Remove
|
||||
// block entries first?
|
||||
_ = e
|
||||
.insert(BlockEntry {
|
||||
lw_lsn: AtomicLsn::new(lw_lsn.0),
|
||||
cache_block: AtomicU64::new(cache_block),
|
||||
pinned: AtomicU64::new(0),
|
||||
referenced: AtomicBool::new(true),
|
||||
})
|
||||
.expect("out of memory");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Forget information about given relation in the cache. (For DROP TABLE and such)
|
||||
pub fn forget_rel(&'t self, rel: &RelTag, _nblocks: Option<u32>, flush_lsn: Lsn) {
|
||||
tracing::info!("forgetting rel entry for {rel:?}");
|
||||
self.relsize_cache.remove(&RelKey::from(rel));
|
||||
|
||||
// update with flush LSN
|
||||
let _ = self.global_lw_lsn.fetch_max(flush_lsn.0, Ordering::Relaxed);
|
||||
|
||||
// also forget all cached blocks for the relation
|
||||
// FIXME
|
||||
/*
|
||||
let mut iter = MapIterator::new(&key_range_for_rel_blocks(rel));
|
||||
let r = self.cache_tree.start_read();
|
||||
while let Some((k, _v)) = iter.next(&r) {
|
||||
let w = self.cache_tree.start_write();
|
||||
|
||||
let mut evicted_cache_block = None;
|
||||
|
||||
let res = w.update_with_fn(&k, |e| {
|
||||
if let Some(e) = e {
|
||||
let block_entry = if let MapEntry::Block(e) = e {
|
||||
e
|
||||
} else {
|
||||
panic!("unexpected map entry type for block key");
|
||||
};
|
||||
let cache_block = block_entry
|
||||
.cache_block
|
||||
.swap(INVALID_CACHE_BLOCK, Ordering::Relaxed);
|
||||
if cache_block != INVALID_CACHE_BLOCK {
|
||||
evicted_cache_block = Some(cache_block);
|
||||
}
|
||||
UpdateAction::Remove
|
||||
} else {
|
||||
UpdateAction::Nothing
|
||||
}
|
||||
});
|
||||
|
||||
// FIXME: It's pretty surprising to run out of memory while removing. But
|
||||
// maybe it can happen because of trying to shrink a node?
|
||||
res.expect("out of memory");
|
||||
|
||||
if let Some(evicted_cache_block) = evicted_cache_block {
|
||||
self.file_cache
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.dealloc_block(evicted_cache_block);
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
}
|
||||
|
||||
// Maintenance routines
|
||||
|
||||
/// Evict one block from the file cache. This is used when the file cache fills up
|
||||
/// Returns the evicted block. It's not put to the free list, so it's available for the
|
||||
/// caller to use immediately.
|
||||
pub fn try_evict_one_cache_block(&self) -> Option<CacheBlock> {
|
||||
let mut clock_hand = self.clock_hand.lock().unwrap();
|
||||
for _ in 0..100 {
|
||||
self.clock_iterations_counter.inc();
|
||||
|
||||
(*clock_hand) += 1;
|
||||
|
||||
let mut evict_this = false;
|
||||
let num_buckets = self.block_map.get_num_buckets();
|
||||
match self
|
||||
.block_map
|
||||
.get_at_bucket((*clock_hand) % num_buckets)
|
||||
.as_deref()
|
||||
{
|
||||
None => {
|
||||
// This bucket was unused
|
||||
}
|
||||
Some((_, blk_entry)) => {
|
||||
if !blk_entry.referenced.swap(false, Ordering::Relaxed) {
|
||||
// Evict this. Maybe.
|
||||
evict_this = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if evict_this {
|
||||
// grab the write lock
|
||||
let mut evicted_cache_block = None;
|
||||
if let Some(e) = self.block_map.entry_at_bucket(*clock_hand % num_buckets) {
|
||||
let old = e.get();
|
||||
// note: all the accesses to 'pinned' currently happen
|
||||
// within update_with_fn(), or while holding ValueReadGuard, which protects from concurrent
|
||||
// updates. Otherwise, another thread could set the 'pinned'
|
||||
// flag just after we have checked it here.
|
||||
if old.pinned.load(Ordering::Relaxed) == 0 {
|
||||
let _ = self
|
||||
.global_lw_lsn
|
||||
.fetch_max(old.lw_lsn.load().0, Ordering::Relaxed);
|
||||
let cache_block =
|
||||
old.cache_block.swap(INVALID_CACHE_BLOCK, Ordering::Relaxed);
|
||||
if cache_block != INVALID_CACHE_BLOCK {
|
||||
evicted_cache_block = Some(cache_block);
|
||||
}
|
||||
e.remove();
|
||||
}
|
||||
}
|
||||
|
||||
if evicted_cache_block.is_some() {
|
||||
self.page_evictions_counter.inc();
|
||||
return evicted_cache_block;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Give up if we didn't find anything
|
||||
None
|
||||
}
|
||||
|
||||
/// Resize the local file cache.
|
||||
pub fn resize_file_cache(&self, num_blocks: u32) {
|
||||
let old_num_blocks = self.block_map.get_num_buckets() as u32;
|
||||
|
||||
if old_num_blocks < num_blocks {
|
||||
if let Err(err) = self.block_map.grow(num_blocks) {
|
||||
tracing::warn!(
|
||||
"could not grow file cache to {} blocks (old size {}): {}",
|
||||
num_blocks,
|
||||
old_num_blocks,
|
||||
err
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// TODO: Shrinking not implemented yet
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dump_map(&self, _dst: &mut dyn std::io::Write) {
|
||||
//FIXME self.cache_map.start_read().dump(dst);
|
||||
}
|
||||
}
|
||||
|
||||
impl metrics::core::Collector for IntegratedCacheWriteAccess<'_> {
|
||||
fn desc(&self) -> Vec<&metrics::core::Desc> {
|
||||
let mut descs = Vec::new();
|
||||
descs.append(&mut self.page_evictions_counter.desc());
|
||||
descs.append(&mut self.clock_iterations_counter.desc());
|
||||
|
||||
descs.append(&mut self.block_map_num_buckets.desc());
|
||||
descs.append(&mut self.block_map_num_buckets_in_use.desc());
|
||||
|
||||
descs.append(&mut self.relsize_cache_num_buckets.desc());
|
||||
descs.append(&mut self.relsize_cache_num_buckets_in_use.desc());
|
||||
|
||||
descs
|
||||
}
|
||||
fn collect(&self) -> Vec<metrics::proto::MetricFamily> {
|
||||
// Update gauges
|
||||
self.block_map_num_buckets
|
||||
.set(self.block_map.get_num_buckets() as i64);
|
||||
self.block_map_num_buckets_in_use
|
||||
.set(self.block_map.get_num_buckets_in_use() as i64);
|
||||
self.relsize_cache_num_buckets
|
||||
.set(self.relsize_cache.get_num_buckets() as i64);
|
||||
self.relsize_cache_num_buckets_in_use
|
||||
.set(self.relsize_cache.get_num_buckets_in_use() as i64);
|
||||
|
||||
let mut values = Vec::new();
|
||||
values.append(&mut self.page_evictions_counter.collect());
|
||||
values.append(&mut self.clock_iterations_counter.collect());
|
||||
|
||||
values.append(&mut self.block_map_num_buckets.collect());
|
||||
values.append(&mut self.block_map_num_buckets_in_use.collect());
|
||||
|
||||
values.append(&mut self.relsize_cache_num_buckets.collect());
|
||||
values.append(&mut self.relsize_cache_num_buckets_in_use.collect());
|
||||
|
||||
values
|
||||
}
|
||||
}
|
||||
|
||||
/// Read relation size from the cache.
|
||||
///
|
||||
/// This is in a separate function so that it can be shared by
|
||||
/// IntegratedCacheReadAccess::get_rel_size() and IntegratedCacheWriteAccess::get_rel_size()
|
||||
fn get_rel_size(
|
||||
r: &neon_shmem::hash::HashMapAccess<RelKey, RelEntry>,
|
||||
rel: &RelTag,
|
||||
) -> Option<u32> {
|
||||
if let Some(rel_entry) = r.get(&RelKey::from(rel)) {
|
||||
let nblocks = rel_entry.nblocks.load(Ordering::Relaxed);
|
||||
if nblocks != u32::MAX {
|
||||
Some(nblocks)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub enum GetBucketResult {
|
||||
Occupied(RelTag, u32),
|
||||
Vacant,
|
||||
OutOfBounds,
|
||||
}
|
||||
|
||||
/// Accessor for other backends
|
||||
///
|
||||
/// This allows backends to read pages from the cache directly, on their own, without making a
|
||||
/// request to the communicator process.
|
||||
impl<'t> IntegratedCacheReadAccess<'t> {
|
||||
pub fn get_rel_size(&'t self, rel: &RelTag) -> Option<u32> {
|
||||
get_rel_size(&self.relsize_cache, rel)
|
||||
}
|
||||
|
||||
pub fn start_read_op(&'t self) -> BackendCacheReadOp<'t> {
|
||||
BackendCacheReadOp {
|
||||
read_guards: Vec::new(),
|
||||
map_access: self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the given page is present in the cache
|
||||
pub fn cache_contains_page(&'t self, rel: &RelTag, block_number: u32) -> bool {
|
||||
self.block_map
|
||||
.get(&BlockKey::from((rel, block_number)))
|
||||
.is_some()
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, bucket_no: usize) -> GetBucketResult {
|
||||
match self.block_map.get_at_bucket(bucket_no).as_deref() {
|
||||
None => {
|
||||
// free bucket, or out of bounds
|
||||
if bucket_no >= self.block_map.get_num_buckets() {
|
||||
GetBucketResult::OutOfBounds
|
||||
} else {
|
||||
GetBucketResult::Vacant
|
||||
}
|
||||
}
|
||||
Some((key, _)) => GetBucketResult::Occupied(key.rel, key.block_number),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BackendCacheReadOp<'t> {
|
||||
read_guards: Vec<DeferredUnpin>,
|
||||
map_access: &'t IntegratedCacheReadAccess<'t>,
|
||||
}
|
||||
|
||||
impl<'e> BackendCacheReadOp<'e> {
|
||||
/// Initiate a read of the page from the cache.
|
||||
///
|
||||
/// This returns the "cache block number", i.e. the block number within the cache file, where
|
||||
/// the page's contents is stored. To get the page contents, the caller needs to read that block
|
||||
/// from the cache file. This returns a guard object that you must hold while it performs the
|
||||
/// read. It's possible that while you are performing the read, the cache block is invalidated.
|
||||
/// After you have completed the read, call BackendCacheReadResult::finish() to check if the
|
||||
/// read was in fact valid or not. If it was concurrently invalidated, you need to retry.
|
||||
pub fn get_page(&mut self, rel: &RelTag, block_number: u32) -> Option<u64> {
|
||||
if let Some(block_entry) = self
|
||||
.map_access
|
||||
.block_map
|
||||
.get(&BlockKey::from((rel, block_number)))
|
||||
{
|
||||
block_entry.referenced.store(true, Ordering::Relaxed);
|
||||
|
||||
let cache_block = block_entry.cache_block.load(Ordering::Relaxed);
|
||||
if cache_block != INVALID_CACHE_BLOCK {
|
||||
block_entry.pinned.fetch_add(1, Ordering::Relaxed);
|
||||
self.read_guards
|
||||
.push(DeferredUnpin(block_entry.pinned.as_ptr()));
|
||||
Some(cache_block)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish(self) -> bool {
|
||||
// TODO: currently, we hold a pin on the in-memory map, so concurrent invalidations are not
|
||||
// possible. But if we switch to optimistic locking, this would return 'false' if the
|
||||
// optimistic locking failed and you need to retry.
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// A hack to decrement an AtomicU64 on drop. This is used to decrement the pin count
|
||||
/// of a BlockEntry. The safety depends on the fact that the BlockEntry is not evicted
|
||||
/// or moved while it's pinned.
|
||||
struct DeferredUnpin(*mut u64);
|
||||
|
||||
unsafe impl Sync for DeferredUnpin {}
|
||||
unsafe impl Send for DeferredUnpin {}
|
||||
|
||||
impl Drop for DeferredUnpin {
|
||||
fn drop(&mut self) {
|
||||
// unpin it
|
||||
unsafe {
|
||||
let pin_ref = AtomicU64::from_ptr(self.0);
|
||||
pin_ref.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,29 @@
|
||||
//! Three main parts:
|
||||
//! - async tokio communicator core, which receives requests and processes them.
|
||||
//! - Main loop and requests queues, which routes requests from backends to the core
|
||||
//! - the per-backend glue code, which submits requests
|
||||
|
||||
mod backend_comms;
|
||||
|
||||
// mark this 'pub', because these functions are called from C code. Otherwise, the compiler
|
||||
// complains about a bunch of structs and enum variants being unused, because it thinkgs
|
||||
// the functions that use them are never called. There are some C-callable functions in
|
||||
// other modules too, but marking this as pub is currently enough to silence the warnings
|
||||
//
|
||||
// TODO: perhaps collect *all* the extern "C" functions to one module?
|
||||
pub mod backend_interface;
|
||||
|
||||
mod file_cache;
|
||||
mod init;
|
||||
mod integrated_cache;
|
||||
mod neon_request;
|
||||
mod worker_process;
|
||||
|
||||
mod global_allocator;
|
||||
|
||||
/// Name of the Unix Domain Socket that serves the metrics, and other APIs in the
|
||||
/// future. This is within the Postgres data directory.
|
||||
const NEON_COMMUNICATOR_SOCKET_NAME: &str = "neon-communicator.socket";
|
||||
|
||||
// FIXME: get this from postgres headers somehow
|
||||
pub const BLCKSZ: usize = 8192;
|
||||
|
||||
432
pgxn/neon/communicator/src/neon_request.rs
Normal file
432
pgxn/neon/communicator/src/neon_request.rs
Normal file
@@ -0,0 +1,432 @@
|
||||
// Definitions of some core PostgreSQL datatypes.
|
||||
|
||||
/// XLogRecPtr is defined in "access/xlogdefs.h" as:
|
||||
///
|
||||
/// ```
|
||||
/// typedef uint64 XLogRecPtr;
|
||||
/// ```
|
||||
/// cbindgen:no-export
|
||||
pub type XLogRecPtr = u64;
|
||||
|
||||
pub type CLsn = XLogRecPtr;
|
||||
pub type COid = u32;
|
||||
|
||||
// This conveniently matches PG_IOV_MAX
|
||||
pub const MAX_GETPAGEV_PAGES: usize = 32;
|
||||
|
||||
pub const INVALID_BLOCK_NUMBER: u32 = u32::MAX;
|
||||
|
||||
use std::ffi::CStr;
|
||||
|
||||
use pageserver_page_api::{self as page_api, SlruKind};
|
||||
|
||||
/// Request from a Postgres backend to the communicator process
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, strum_macros::EnumDiscriminants)]
|
||||
#[strum_discriminants(derive(measured::FixedCardinalityLabel))]
|
||||
pub enum NeonIORequest {
|
||||
Empty,
|
||||
|
||||
// Read requests. These are C-friendly variants of the corresponding structs in
|
||||
// pageserver_page_api.
|
||||
RelSize(CRelSizeRequest),
|
||||
GetPageV(CGetPageVRequest),
|
||||
ReadSlruSegment(CReadSlruSegmentRequest),
|
||||
PrefetchV(CPrefetchVRequest),
|
||||
DbSize(CDbSizeRequest),
|
||||
|
||||
// Write requests. These are needed to keep the relation size cache and LFC up-to-date.
|
||||
// They are not sent to the pageserver.
|
||||
WritePage(CWritePageRequest),
|
||||
RelExtend(CRelExtendRequest),
|
||||
RelZeroExtend(CRelZeroExtendRequest),
|
||||
RelCreate(CRelCreateRequest),
|
||||
RelTruncate(CRelTruncateRequest),
|
||||
RelUnlink(CRelUnlinkRequest),
|
||||
|
||||
// Other requests
|
||||
UpdateCachedRelSize(CUpdateCachedRelSizeRequest),
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum NeonIOResult {
|
||||
Empty,
|
||||
/// InvalidBlockNumber == 0xffffffff means "rel does not exist"
|
||||
RelSize(u32),
|
||||
|
||||
/// the result pages are written to the shared memory addresses given in the request
|
||||
GetPageV,
|
||||
/// The result is written to the file, path to which is provided
|
||||
/// in the request. The [`u64`] value here is the number of blocks.
|
||||
ReadSlruSegment(u64),
|
||||
|
||||
/// A prefetch request returns as soon as the request has been received by the communicator.
|
||||
/// It is processed in the background.
|
||||
PrefetchVLaunched,
|
||||
|
||||
DbSize(u64),
|
||||
|
||||
// FIXME design compact error codes. Can't easily pass a string or other dynamic data.
|
||||
// currently, this is 'errno'
|
||||
Error(i32),
|
||||
|
||||
Aborted,
|
||||
|
||||
/// used for all write requests
|
||||
WriteOK,
|
||||
}
|
||||
|
||||
impl NeonIORequest {
|
||||
/// All requests include a unique request ID, which can be used to trace the execution
|
||||
/// of a request all the way to the pageservers. The request ID needs to be unique
|
||||
/// within the lifetime of the Postgres instance (but not across servers or across
|
||||
/// restarts of the same server).
|
||||
pub fn request_id(&self) -> u64 {
|
||||
use NeonIORequest::*;
|
||||
match self {
|
||||
Empty => 0,
|
||||
RelSize(req) => req.request_id,
|
||||
GetPageV(req) => req.request_id,
|
||||
ReadSlruSegment(req) => req.request_id,
|
||||
PrefetchV(req) => req.request_id,
|
||||
DbSize(req) => req.request_id,
|
||||
WritePage(req) => req.request_id,
|
||||
RelExtend(req) => req.request_id,
|
||||
RelZeroExtend(req) => req.request_id,
|
||||
RelCreate(req) => req.request_id,
|
||||
RelTruncate(req) => req.request_id,
|
||||
RelUnlink(req) => req.request_id,
|
||||
UpdateCachedRelSize(req) => req.request_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Special quick result to a CGetPageVRequest request, indicating that the
|
||||
/// the requested pages are present in the local file cache. The backend can
|
||||
/// read the blocks directly from the given LFC blocks.
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CCachedGetPageVResult {
|
||||
pub cache_block_numbers: [u64; MAX_GETPAGEV_PAGES],
|
||||
}
|
||||
|
||||
/// ShmemBuf represents a buffer in shared memory.
|
||||
///
|
||||
/// SAFETY: The pointer must point to an area in shared memory. The functions allow you to liberally
|
||||
/// get a mutable pointer to the contents; it is the caller's responsibility to ensure that you
|
||||
/// don't access a buffer that's you're not allowed to. Inappropriate access to the buffer doesn't
|
||||
/// violate Rust's safety semantics, but it will mess up and crash Postgres.
|
||||
///
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ShmemBuf {
|
||||
// Pointer to where the result is written or where to read from. Must point into a buffer in shared memory!
|
||||
pub ptr: *mut u8,
|
||||
}
|
||||
|
||||
unsafe impl Send for ShmemBuf {}
|
||||
unsafe impl Sync for ShmemBuf {}
|
||||
|
||||
unsafe impl uring_common::buf::IoBuf for ShmemBuf {
|
||||
fn stable_ptr(&self) -> *const u8 {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
fn bytes_init(&self) -> usize {
|
||||
crate::BLCKSZ
|
||||
}
|
||||
|
||||
fn bytes_total(&self) -> usize {
|
||||
crate::BLCKSZ
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl uring_common::buf::IoBufMut for ShmemBuf {
|
||||
fn stable_mut_ptr(&mut self) -> *mut u8 {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
unsafe fn set_init(&mut self, pos: usize) {
|
||||
if pos > crate::BLCKSZ {
|
||||
panic!(
|
||||
"set_init called past end of buffer, pos {}, buffer size {}",
|
||||
pos,
|
||||
crate::BLCKSZ
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ShmemBuf {
|
||||
pub fn as_mut_ptr(&self) -> *mut u8 {
|
||||
self.ptr
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CRelSizeRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub allow_missing: bool,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CGetPageVRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub block_number: u32,
|
||||
pub nblocks: u8,
|
||||
|
||||
// These fields define where the result is written. Must point into a buffer in shared memory!
|
||||
pub dest: [ShmemBuf; MAX_GETPAGEV_PAGES],
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CReadSlruSegmentRequest {
|
||||
pub request_id: u64,
|
||||
pub slru_kind: SlruKind,
|
||||
pub segment_number: u32,
|
||||
pub request_lsn: CLsn,
|
||||
/// Must be a null-terminated C string containing the file path
|
||||
/// where the communicator will write the SLRU segment.
|
||||
pub destination_file_path: ShmemBuf,
|
||||
}
|
||||
|
||||
impl CReadSlruSegmentRequest {
|
||||
/// Returns the file path where the communicator will write the
|
||||
/// SLRU segment.
|
||||
pub(crate) fn destination_file_path(&self) -> String {
|
||||
unsafe { CStr::from_ptr(self.destination_file_path.as_mut_ptr() as *const _) }
|
||||
.to_string_lossy()
|
||||
.into_owned()
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CPrefetchVRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub block_number: u32,
|
||||
pub nblocks: u8,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CDbSizeRequest {
|
||||
pub request_id: u64,
|
||||
pub db_oid: COid,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CWritePageRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub block_number: u32,
|
||||
pub lsn: CLsn,
|
||||
|
||||
// `src` defines the new page contents. Must point into a buffer in shared memory!
|
||||
pub src: ShmemBuf,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CRelExtendRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub block_number: u32,
|
||||
pub lsn: CLsn,
|
||||
|
||||
// `src` defines the new page contents. Must point into a buffer in shared memory!
|
||||
pub src: ShmemBuf,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CRelZeroExtendRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub block_number: u32,
|
||||
pub nblocks: u32,
|
||||
pub lsn: CLsn,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CRelCreateRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub lsn: CLsn,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CRelTruncateRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub nblocks: u32,
|
||||
pub lsn: CLsn,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CRelUnlinkRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub lsn: CLsn,
|
||||
}
|
||||
|
||||
impl CRelSizeRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CGetPageVRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CPrefetchVRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CWritePageRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CRelExtendRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CRelZeroExtendRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CRelCreateRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CRelTruncateRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CRelUnlinkRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CUpdateCachedRelSizeRequest {
|
||||
pub request_id: u64,
|
||||
pub spc_oid: COid,
|
||||
pub db_oid: COid,
|
||||
pub rel_number: u32,
|
||||
pub fork_number: u8,
|
||||
pub nblocks: u32,
|
||||
pub lsn: CLsn,
|
||||
}
|
||||
|
||||
impl CUpdateCachedRelSizeRequest {
|
||||
pub fn reltag(&self) -> page_api::RelTag {
|
||||
page_api::RelTag {
|
||||
spcnode: self.spc_oid,
|
||||
dbnode: self.db_oid,
|
||||
relnode: self.rel_number,
|
||||
forknum: self.fork_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,13 @@
|
||||
//!
|
||||
//! These are called from the communicator threads! Careful what you do, most Postgres
|
||||
//! functions are not safe to call in that context.
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
#[cfg(not(test))]
|
||||
unsafe extern "C" {
|
||||
pub fn notify_proc_unsafe(procno: std::ffi::c_int);
|
||||
pub fn callback_set_my_latch_unsafe();
|
||||
pub fn callback_get_request_lsn_unsafe() -> crate::neon_request::CLsn;
|
||||
pub fn callback_get_lfc_metrics_unsafe() -> LfcMetrics;
|
||||
}
|
||||
|
||||
@@ -16,20 +19,36 @@ unsafe extern "C" {
|
||||
// package, but the code coverage build still builds these and tries to link with the
|
||||
// external C code.)
|
||||
#[cfg(test)]
|
||||
unsafe fn notify_proc_unsafe(_procno: std::ffi::c_int) {
|
||||
panic!("not usable in unit tests");
|
||||
}
|
||||
#[cfg(test)]
|
||||
unsafe fn callback_set_my_latch_unsafe() {
|
||||
panic!("not usable in unit tests");
|
||||
}
|
||||
#[cfg(test)]
|
||||
unsafe fn callback_get_request_lsn_unsafe() -> crate::neon_request::CLsn {
|
||||
panic!("not usable in unit tests");
|
||||
}
|
||||
#[cfg(test)]
|
||||
unsafe fn callback_get_lfc_metrics_unsafe() -> LfcMetrics {
|
||||
panic!("not usable in unit tests");
|
||||
}
|
||||
|
||||
// safe wrappers
|
||||
|
||||
pub(super) fn notify_proc(procno: std::ffi::c_int) {
|
||||
unsafe { notify_proc_unsafe(procno) };
|
||||
}
|
||||
|
||||
pub(super) fn callback_set_my_latch() {
|
||||
unsafe { callback_set_my_latch_unsafe() };
|
||||
}
|
||||
|
||||
pub(super) fn get_request_lsn() -> Lsn {
|
||||
Lsn(unsafe { callback_get_request_lsn_unsafe() })
|
||||
}
|
||||
|
||||
pub(super) fn callback_get_lfc_metrics() -> LfcMetrics {
|
||||
unsafe { callback_get_lfc_metrics_unsafe() }
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ use tokio::net::UnixListener;
|
||||
use crate::NEON_COMMUNICATOR_SOCKET_NAME;
|
||||
use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct;
|
||||
|
||||
impl CommunicatorWorkerProcessStruct {
|
||||
impl<'a> CommunicatorWorkerProcessStruct<'a> {
|
||||
/// Launch the listener
|
||||
pub(crate) async fn launch_control_socket_listener(
|
||||
&'static self,
|
||||
@@ -38,6 +38,7 @@ impl CommunicatorWorkerProcessStruct {
|
||||
.route("/metrics", get(get_metrics))
|
||||
.route("/autoscaling_metrics", get(get_autoscaling_metrics))
|
||||
.route("/debug/panic", get(handle_debug_panic))
|
||||
.route("/debug/dump_cache_map", get(dump_cache_map))
|
||||
.with_state(self);
|
||||
|
||||
// If the server is restarted, there might be an old socket still
|
||||
@@ -68,7 +69,7 @@ impl CommunicatorWorkerProcessStruct {
|
||||
}
|
||||
|
||||
/// Expose all Prometheus metrics.
|
||||
async fn get_metrics(State(state): State<&CommunicatorWorkerProcessStruct>) -> Response {
|
||||
async fn get_metrics(State(state): State<&CommunicatorWorkerProcessStruct<'_>>) -> Response {
|
||||
tracing::trace!("/metrics requested");
|
||||
metrics_to_response(&state).await
|
||||
}
|
||||
@@ -77,13 +78,15 @@ async fn get_metrics(State(state): State<&CommunicatorWorkerProcessStruct>) -> R
|
||||
///
|
||||
/// This is a subset of all the metrics.
|
||||
async fn get_autoscaling_metrics(
|
||||
State(state): State<&CommunicatorWorkerProcessStruct>,
|
||||
State(state): State<&CommunicatorWorkerProcessStruct<'_>>,
|
||||
) -> Response {
|
||||
tracing::trace!("/metrics requested");
|
||||
metrics_to_response(&state.lfc_metrics).await
|
||||
}
|
||||
|
||||
async fn handle_debug_panic(State(_state): State<&CommunicatorWorkerProcessStruct>) -> Response {
|
||||
async fn handle_debug_panic(
|
||||
State(_state): State<&CommunicatorWorkerProcessStruct<'_>>,
|
||||
) -> Response {
|
||||
panic!("test HTTP handler task panic");
|
||||
}
|
||||
|
||||
@@ -100,3 +103,16 @@ async fn metrics_to_response(metrics: &(dyn MetricGroup<BufferedTextEncoder> + S
|
||||
.body(Body::from(enc.finish()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn dump_cache_map(
|
||||
State(state): State<&CommunicatorWorkerProcessStruct<'static>>,
|
||||
) -> Response {
|
||||
let mut buf: Vec<u8> = Vec::new();
|
||||
state.cache.dump_map(&mut buf);
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(CONTENT_TYPE, "application/text")
|
||||
.body(Body::from(buf))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
95
pgxn/neon/communicator/src/worker_process/in_progress_ios.rs
Normal file
95
pgxn/neon/communicator/src/worker_process/in_progress_ios.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
//! Lock table to ensure that only one IO request is in flight for a given
|
||||
//! block (or relation or database metadata) at a time
|
||||
|
||||
use std::cmp::Eq;
|
||||
use std::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::{Mutex, OwnedMutexGuard};
|
||||
|
||||
use clashmap::ClashMap;
|
||||
use clashmap::Entry;
|
||||
|
||||
use pageserver_page_api::RelTag;
|
||||
|
||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
||||
pub enum RequestInProgressKey {
|
||||
Db(u32),
|
||||
Rel(RelTag),
|
||||
Block(RelTag, u32),
|
||||
}
|
||||
|
||||
type RequestId = u64;
|
||||
|
||||
pub type RequestInProgressTable = MutexHashMap<RequestInProgressKey, RequestId>;
|
||||
|
||||
// more primitive locking thingie:
|
||||
|
||||
pub struct MutexHashMap<K, V>
|
||||
where
|
||||
K: Clone + Eq + Hash,
|
||||
{
|
||||
lock_table: ClashMap<K, (V, Arc<Mutex<()>>)>,
|
||||
}
|
||||
|
||||
pub struct MutexHashMapGuard<'a, K, V>
|
||||
where
|
||||
K: Clone + Eq + Hash,
|
||||
{
|
||||
pub key: K,
|
||||
map: &'a MutexHashMap<K, V>,
|
||||
mutex: Arc<Mutex<()>>,
|
||||
_guard: OwnedMutexGuard<()>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> Drop for MutexHashMapGuard<'a, K, V>
|
||||
where
|
||||
K: Clone + Eq + Hash,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
let (_old_key, old_val) = self.map.lock_table.remove(&self.key).unwrap();
|
||||
assert!(Arc::ptr_eq(&old_val.1, &self.mutex));
|
||||
|
||||
// the guard will be dropped as we return
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> MutexHashMap<K, V>
|
||||
where
|
||||
K: Clone + Eq + Hash,
|
||||
V: std::fmt::Display + Copy,
|
||||
{
|
||||
pub fn new() -> MutexHashMap<K, V> {
|
||||
MutexHashMap {
|
||||
lock_table: ClashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn lock<'a>(&'a self, key: K, val: V) -> MutexHashMapGuard<'a, K, V> {
|
||||
let my_mutex = Arc::new(Mutex::new(()));
|
||||
let my_guard = Arc::clone(&my_mutex).lock_owned().await;
|
||||
|
||||
loop {
|
||||
let (request_id, lock) = match self.lock_table.entry(key.clone()) {
|
||||
Entry::Occupied(e) => {
|
||||
let e = e.get();
|
||||
(e.0, Arc::clone(&e.1))
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((val, Arc::clone(&my_mutex)));
|
||||
break;
|
||||
}
|
||||
};
|
||||
tracing::info!("waiting for conflicting IO {request_id} to complete");
|
||||
let _ = lock.lock().await;
|
||||
tracing::info!("conflicting IO {request_id} completed");
|
||||
}
|
||||
|
||||
MutexHashMapGuard {
|
||||
key,
|
||||
map: self,
|
||||
mutex: my_mutex,
|
||||
_guard: my_guard,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,34 +1,111 @@
|
||||
use std::collections::HashMap;
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::os::fd::OwnedFd;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr as _;
|
||||
|
||||
use crate::backend_comms::NeonIORequestSlot;
|
||||
use crate::file_cache::FileCache;
|
||||
use crate::global_allocator::MyAllocatorCollector;
|
||||
use crate::init::CommunicatorInitStruct;
|
||||
use crate::integrated_cache::{CacheResult, IntegratedCacheWriteAccess};
|
||||
use crate::neon_request::{CGetPageVRequest, CPrefetchVRequest};
|
||||
use crate::neon_request::{INVALID_BLOCK_NUMBER, NeonIORequest, NeonIOResult};
|
||||
use crate::worker_process::in_progress_ios::{RequestInProgressKey, RequestInProgressTable};
|
||||
use crate::worker_process::lfc_metrics::LfcMetricsCollector;
|
||||
use pageserver_client_grpc::{PageserverClient, ShardSpec, ShardStripeSize};
|
||||
use pageserver_page_api as page_api;
|
||||
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio_pipe::PipeRead;
|
||||
use uring_common::buf::IoBuf;
|
||||
|
||||
use measured::MetricGroup;
|
||||
use measured::metric::MetricEncoding;
|
||||
use measured::metric::gauge::GaugeState;
|
||||
use measured::metric::group::Encoding;
|
||||
use measured::{Gauge, GaugeVec};
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
pub struct CommunicatorWorkerProcessStruct {
|
||||
use super::callbacks::{get_request_lsn, notify_proc};
|
||||
|
||||
use tracing::{debug, error, info, info_span, trace};
|
||||
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
pub struct CommunicatorWorkerProcessStruct<'a> {
|
||||
/// Tokio runtime that the main loop and any other related tasks runs in.
|
||||
runtime: tokio::runtime::Runtime,
|
||||
|
||||
/// Client to communicate with the pageserver
|
||||
client: Option<PageserverClient>,
|
||||
|
||||
/// Request slots that backends use to send IO requests to the communicator.
|
||||
neon_request_slots: &'a [NeonIORequestSlot],
|
||||
|
||||
/// Notification pipe. Backends use this to notify the communicator that a request is waiting to
|
||||
/// be processed in one of the request slots.
|
||||
submission_pipe_read_fd: OwnedFd,
|
||||
|
||||
/// Locking table for all in-progress IO requests.
|
||||
in_progress_table: RequestInProgressTable,
|
||||
|
||||
/// Local File Cache, relation size tracking, last-written LSN tracking
|
||||
pub(crate) cache: IntegratedCacheWriteAccess<'a>,
|
||||
|
||||
/*** Metrics ***/
|
||||
pub(crate) lfc_metrics: LfcMetricsCollector,
|
||||
|
||||
request_counters: GaugeVec<RequestTypeLabelGroupSet>,
|
||||
|
||||
getpage_cache_misses_counter: Gauge,
|
||||
getpage_cache_hits_counter: Gauge,
|
||||
|
||||
// For the requests that affect multiple blocks, have separate counters for the # of blocks affected
|
||||
request_nblocks_counters: GaugeVec<RequestTypeLabelGroupSet>,
|
||||
|
||||
#[allow(dead_code)]
|
||||
allocator_metrics: MyAllocatorCollector,
|
||||
}
|
||||
|
||||
// Define a label group, consisting of 1 or more label values
|
||||
#[derive(measured::LabelGroup)]
|
||||
#[label(set = RequestTypeLabelGroupSet)]
|
||||
struct RequestTypeLabelGroup {
|
||||
request_type: crate::neon_request::NeonIORequestDiscriminants,
|
||||
}
|
||||
|
||||
impl RequestTypeLabelGroup {
|
||||
fn from_req(req: &NeonIORequest) -> Self {
|
||||
RequestTypeLabelGroup {
|
||||
request_type: req.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Launch the communicator process's Rust subsystems
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) fn init(
|
||||
cis: CommunicatorInitStruct,
|
||||
tenant_id: Option<&str>,
|
||||
timeline_id: Option<&str>,
|
||||
) -> Result<&'static CommunicatorWorkerProcessStruct, String> {
|
||||
auth_token: Option<&str>,
|
||||
shard_map: HashMap<utils::shard::ShardIndex, String>,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
initial_file_cache_size: u64,
|
||||
file_cache_path: Option<PathBuf>,
|
||||
) -> Result<&'static CommunicatorWorkerProcessStruct<'static>, String> {
|
||||
// The caller validated these already
|
||||
let _tenant_id = tenant_id
|
||||
let tenant_id = tenant_id
|
||||
.map(TenantId::from_str)
|
||||
.transpose()
|
||||
.map_err(|e| format!("invalid tenant ID: {e}"))?;
|
||||
let _timeline_id = timeline_id
|
||||
let timeline_id = timeline_id
|
||||
.map(TimelineId::from_str)
|
||||
.transpose()
|
||||
.map_err(|e| format!("invalid timeline ID: {e}"))?;
|
||||
let shard_spec =
|
||||
ShardSpec::new(shard_map, stripe_size).map_err(|e| format!("invalid shard spec: {e}:"))?;
|
||||
|
||||
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
@@ -36,16 +113,72 @@ pub(super) fn init(
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let last_lsn = get_request_lsn();
|
||||
|
||||
let file_cache = if let Some(path) = file_cache_path {
|
||||
Some(FileCache::new(&path, initial_file_cache_size).expect("could not create cache file"))
|
||||
} else {
|
||||
// FIXME: temporarily for testing, use LFC even if disabled
|
||||
Some(
|
||||
FileCache::new(&PathBuf::from("new_filecache"), 1000)
|
||||
.expect("could not create cache file"),
|
||||
)
|
||||
};
|
||||
|
||||
// Initialize subsystems
|
||||
let cache = cis
|
||||
.integrated_cache_init_struct
|
||||
.worker_process_init(last_lsn, file_cache);
|
||||
|
||||
debug!("Initialised integrated cache: {cache:?}");
|
||||
|
||||
let client = if let (Some(tenant_id), Some(timeline_id)) = (tenant_id, timeline_id) {
|
||||
let _guard = runtime.enter();
|
||||
Some(
|
||||
PageserverClient::new(
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
shard_spec,
|
||||
auth_token.map(|s| s.to_string()),
|
||||
None,
|
||||
)
|
||||
.expect("could not create client"),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let worker_struct = CommunicatorWorkerProcessStruct {
|
||||
// Note: it's important to not drop the runtime, or all the tasks are dropped
|
||||
// too. Including it in the returned struct is one way to keep it around.
|
||||
runtime,
|
||||
neon_request_slots: cis.neon_request_slots,
|
||||
client,
|
||||
cache,
|
||||
submission_pipe_read_fd: cis.submission_pipe_read_fd,
|
||||
in_progress_table: RequestInProgressTable::new(),
|
||||
|
||||
// metrics
|
||||
lfc_metrics: LfcMetricsCollector,
|
||||
|
||||
request_counters: GaugeVec::new(),
|
||||
|
||||
getpage_cache_misses_counter: Gauge::new(),
|
||||
getpage_cache_hits_counter: Gauge::new(),
|
||||
|
||||
request_nblocks_counters: GaugeVec::new(),
|
||||
|
||||
allocator_metrics: MyAllocatorCollector::new(),
|
||||
};
|
||||
|
||||
let worker_struct = Box::leak(Box::new(worker_struct));
|
||||
|
||||
let main_loop_handle = worker_struct.runtime.spawn(worker_struct.run());
|
||||
worker_struct.runtime.spawn(async {
|
||||
let err = main_loop_handle.await.unwrap_err();
|
||||
error!("error: {err:?}");
|
||||
});
|
||||
|
||||
// Start the listener on the control socket
|
||||
worker_struct
|
||||
.runtime
|
||||
@@ -55,12 +188,577 @@ pub(super) fn init(
|
||||
Ok(worker_struct)
|
||||
}
|
||||
|
||||
impl<T> MetricGroup<T> for CommunicatorWorkerProcessStruct
|
||||
impl<'t> CommunicatorWorkerProcessStruct<'t> {
|
||||
/// Update the configuration
|
||||
pub(super) fn update_shard_map(
|
||||
&self,
|
||||
new_shard_map: HashMap<utils::shard::ShardIndex, String>,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
) {
|
||||
let client = self.client.as_ref().unwrap();
|
||||
let shard_spec = ShardSpec::new(new_shard_map, stripe_size).expect("invalid shard spec");
|
||||
|
||||
{
|
||||
let _in_runtime = self.runtime.enter();
|
||||
if let Err(err) = client.update_shards(shard_spec) {
|
||||
tracing::error!("could not update shard map: {err:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Main loop of the worker process. Receive requests from the backends and process them.
|
||||
pub(super) async fn run(&'static self) {
|
||||
let mut idxbuf: [u8; 4] = [0; 4];
|
||||
|
||||
let mut submission_pipe_read =
|
||||
PipeRead::try_from(self.submission_pipe_read_fd.as_raw_fd()).expect("invalid pipe fd");
|
||||
|
||||
loop {
|
||||
// Wait for a backend to ring the doorbell
|
||||
match submission_pipe_read.read(&mut idxbuf).await {
|
||||
Ok(4) => {}
|
||||
Ok(nbytes) => panic!("short read ({nbytes} bytes) on communicator pipe"),
|
||||
Err(e) => panic!("error reading from communicator pipe: {e}"),
|
||||
}
|
||||
let slot_idx = u32::from_ne_bytes(idxbuf) as usize;
|
||||
|
||||
// Read the IO request from the slot indicated in the wakeup
|
||||
let Some(slot) = self.neon_request_slots[slot_idx].start_processing_request() else {
|
||||
// This currently should not happen. But if we had multiple threads picking up
|
||||
// requests, and without waiting for the notifications, it could.
|
||||
panic!("no request in slot");
|
||||
};
|
||||
|
||||
// Ok, we have ownership of this request now. We must process it now, there's no going
|
||||
// back.
|
||||
//
|
||||
// Spawn a separate task for every request. That's a little excessive for requests that
|
||||
// can be quickly satisfied from the cache, but we expect that to be rare, because the
|
||||
// requesting backend would have already checked the cache.
|
||||
tokio::spawn(async move {
|
||||
use tracing::Instrument;
|
||||
|
||||
let request_id = slot.get_request().request_id();
|
||||
let owner_procno = slot.get_owner_procno();
|
||||
|
||||
let span = info_span!(
|
||||
"processing",
|
||||
request_id = request_id,
|
||||
slot_idx = slot_idx,
|
||||
procno = owner_procno,
|
||||
);
|
||||
async {
|
||||
// FIXME: as a temporary hack, abort the request if we don't get a response
|
||||
// promptly.
|
||||
//
|
||||
// Lots of regression tests are getting stuck and failing at the moment,
|
||||
// this makes them fail a little faster, which it faster to iterate.
|
||||
// This needs to be removed once more regression tests are passing.
|
||||
// See also similar hack in the backend code, in wait_request_completion()
|
||||
let result = tokio::time::timeout(
|
||||
tokio::time::Duration::from_secs(30),
|
||||
self.handle_request(slot.get_request()),
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|_elapsed| {
|
||||
info!("request {request_id} timed out");
|
||||
NeonIOResult::Error(libc::ETIMEDOUT)
|
||||
});
|
||||
trace!("request {request_id} at slot {slot_idx} completed");
|
||||
|
||||
// Ok, we have completed the IO. Mark the request as completed. After that,
|
||||
// we no longer have ownership of the slot, and must not modify it.
|
||||
slot.completed(result);
|
||||
|
||||
// Notify the backend about the completion. (Note that the backend might see
|
||||
// the completed status even before this; this is just a wakeup)
|
||||
notify_proc(owner_procno);
|
||||
}
|
||||
.instrument(span)
|
||||
.await
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the 'request_lsn' to use for a pageserver request
|
||||
fn request_lsns(&self, not_modified_since_lsn: Lsn) -> page_api::ReadLsn {
|
||||
let mut request_lsn = get_request_lsn();
|
||||
|
||||
// Is it possible that the last-written LSN is ahead of last flush LSN? Generally not, we
|
||||
// shouldn't evict a page from the buffer cache before all its modifications have been
|
||||
// safely flushed. That's the "WAL before data" rule. However, there are a few exceptions:
|
||||
//
|
||||
// - when creation an index: _bt_blwritepage logs the full page without flushing WAL before
|
||||
// smgrextend (files are fsynced before build ends).
|
||||
//
|
||||
// XXX: If we make a request LSN greater than the current WAL flush LSN, the pageserver would
|
||||
// block waiting for the WAL arrive, until we flush it and it propagates through the
|
||||
// safekeepers to the pageserver. If there's nothing that forces the WAL to be flushed,
|
||||
// the pageserver would get stuck waiting forever. To avoid that, all the write-
|
||||
// functions in communicator_new.c call XLogSetAsyncXactLSN(). That nudges the WAL writer to
|
||||
// perform the flush relatively soon.
|
||||
//
|
||||
// It would perhaps be nicer to do the WAL flush here, but it's tricky to call back into
|
||||
// Postgres code to do that from here. That's why we rely on communicator_new.c to do the
|
||||
// calls "pre-emptively".
|
||||
//
|
||||
// FIXME: Because of the above, it can still happen that the flush LSN is ahead of
|
||||
// not_modified_since, if the WAL writer hasn't done the flush yet. It would be nice to know
|
||||
// if there are other cases like that that we have mised, but unfortunately we cannot turn
|
||||
// this into an assertion because of that legit case.
|
||||
//
|
||||
// See also the old logic in neon_get_request_lsns() C function
|
||||
if not_modified_since_lsn > request_lsn {
|
||||
tracing::info!(
|
||||
"not_modified_since_lsn {} is ahead of last flushed LSN {}",
|
||||
not_modified_since_lsn,
|
||||
request_lsn
|
||||
);
|
||||
request_lsn = not_modified_since_lsn;
|
||||
}
|
||||
|
||||
page_api::ReadLsn {
|
||||
request_lsn,
|
||||
not_modified_since_lsn: Some(not_modified_since_lsn),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle one IO request
|
||||
async fn handle_request(&'static self, request: &'_ NeonIORequest) -> NeonIOResult {
|
||||
let client = self
|
||||
.client
|
||||
.as_ref()
|
||||
.expect("cannot handle requests without client");
|
||||
|
||||
self.request_counters
|
||||
.inc(RequestTypeLabelGroup::from_req(request));
|
||||
match request {
|
||||
NeonIORequest::Empty => {
|
||||
error!("unexpected Empty IO request");
|
||||
NeonIOResult::Error(0)
|
||||
}
|
||||
NeonIORequest::RelSize(req) => {
|
||||
let rel = req.reltag();
|
||||
|
||||
let _in_progress_guard = self
|
||||
.in_progress_table
|
||||
.lock(RequestInProgressKey::Rel(rel), req.request_id)
|
||||
.await;
|
||||
|
||||
// Check the cache first
|
||||
let not_modified_since = match self.cache.get_rel_size(&rel) {
|
||||
CacheResult::Found(nblocks) => {
|
||||
tracing::trace!("found relsize for {:?} in cache: {}", rel, nblocks);
|
||||
return NeonIOResult::RelSize(nblocks);
|
||||
}
|
||||
// XXX: we don't cache negative entries, so if there's no entry in the cache, it could mean
|
||||
// that the relation doesn't exist or that we don't have it cached.
|
||||
CacheResult::NotFound(lsn) => lsn,
|
||||
};
|
||||
|
||||
let read_lsn = self.request_lsns(not_modified_since);
|
||||
match client
|
||||
.get_rel_size(page_api::GetRelSizeRequest {
|
||||
read_lsn,
|
||||
rel,
|
||||
allow_missing: req.allow_missing,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(Some(nblocks)) => {
|
||||
// update the cache
|
||||
tracing::info!(
|
||||
"updated relsize for {:?} in cache: {}, lsn {}",
|
||||
rel,
|
||||
nblocks,
|
||||
read_lsn
|
||||
);
|
||||
self.cache
|
||||
.remember_rel_size(&rel, nblocks, not_modified_since);
|
||||
|
||||
NeonIOResult::RelSize(nblocks)
|
||||
}
|
||||
Ok(None) => {
|
||||
// TODO: cache negative entry?
|
||||
NeonIOResult::RelSize(INVALID_BLOCK_NUMBER)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("tonic error: {err:?}");
|
||||
NeonIOResult::Error(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
NeonIORequest::GetPageV(req) => match self.handle_get_pagev_request(req).await {
|
||||
Ok(()) => NeonIOResult::GetPageV,
|
||||
Err(errno) => NeonIOResult::Error(errno),
|
||||
},
|
||||
NeonIORequest::ReadSlruSegment(req) => {
|
||||
let lsn = Lsn(req.request_lsn);
|
||||
let file_path = req.destination_file_path();
|
||||
|
||||
match client
|
||||
.get_slru_segment(page_api::GetSlruSegmentRequest {
|
||||
read_lsn: self.request_lsns(lsn),
|
||||
kind: req.slru_kind,
|
||||
segno: req.segment_number,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(slru_bytes) => {
|
||||
if let Err(e) = tokio::fs::write(&file_path, &slru_bytes).await {
|
||||
info!("could not write slru segment to file {file_path}: {e}");
|
||||
return NeonIOResult::Error(e.raw_os_error().unwrap_or(libc::EIO));
|
||||
}
|
||||
|
||||
let blocks_count = slru_bytes.len() / crate::BLCKSZ;
|
||||
|
||||
NeonIOResult::ReadSlruSegment(blocks_count as _)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("tonic error: {err:?}");
|
||||
NeonIOResult::Error(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
NeonIORequest::PrefetchV(req) => {
|
||||
self.request_nblocks_counters
|
||||
.inc_by(RequestTypeLabelGroup::from_req(request), req.nblocks as i64);
|
||||
let req = *req;
|
||||
tokio::spawn(async move { self.handle_prefetchv_request(&req).await });
|
||||
NeonIOResult::PrefetchVLaunched
|
||||
}
|
||||
NeonIORequest::DbSize(req) => {
|
||||
let _in_progress_guard = self
|
||||
.in_progress_table
|
||||
.lock(RequestInProgressKey::Db(req.db_oid), req.request_id)
|
||||
.await;
|
||||
|
||||
// Check the cache first
|
||||
let not_modified_since = match self.cache.get_db_size(req.db_oid) {
|
||||
CacheResult::Found(db_size) => {
|
||||
// get_page already copied the block content to the destination
|
||||
return NeonIOResult::DbSize(db_size);
|
||||
}
|
||||
CacheResult::NotFound(lsn) => lsn,
|
||||
};
|
||||
|
||||
match client
|
||||
.get_db_size(page_api::GetDbSizeRequest {
|
||||
read_lsn: self.request_lsns(not_modified_since),
|
||||
db_oid: req.db_oid,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(db_size) => NeonIOResult::DbSize(db_size),
|
||||
Err(err) => {
|
||||
info!("tonic error: {err:?}");
|
||||
NeonIOResult::Error(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write requests
|
||||
NeonIORequest::WritePage(req) => {
|
||||
let rel = req.reltag();
|
||||
let _in_progress_guard = self
|
||||
.in_progress_table
|
||||
.lock(
|
||||
RequestInProgressKey::Block(rel, req.block_number),
|
||||
req.request_id,
|
||||
)
|
||||
.await;
|
||||
|
||||
// We must at least update the last-written LSN on the page, but also store the page
|
||||
// image in the LFC while we still have it
|
||||
self.cache
|
||||
.remember_page(&rel, req.block_number, req.src, Lsn(req.lsn), true)
|
||||
.await;
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
NeonIORequest::RelExtend(req) => {
|
||||
let rel = req.reltag();
|
||||
let _in_progress_guard = self
|
||||
.in_progress_table
|
||||
.lock(
|
||||
RequestInProgressKey::Block(rel, req.block_number),
|
||||
req.request_id,
|
||||
)
|
||||
.await;
|
||||
|
||||
// We must at least update the last-written LSN on the page and the relation size,
|
||||
// but also store the page image in the LFC while we still have it
|
||||
self.cache
|
||||
.remember_page(&rel, req.block_number, req.src, Lsn(req.lsn), true)
|
||||
.await;
|
||||
self.cache
|
||||
.remember_rel_size(&req.reltag(), req.block_number + 1, Lsn(req.lsn));
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
NeonIORequest::RelZeroExtend(req) => {
|
||||
self.request_nblocks_counters
|
||||
.inc_by(RequestTypeLabelGroup::from_req(request), req.nblocks as i64);
|
||||
|
||||
// TODO: need to grab an io-in-progress lock for this? I guess not
|
||||
// TODO: We could put the empty pages to the cache. Maybe have
|
||||
// a marker on the block entries for all-zero pages, instead of
|
||||
// actually storing the empty pages.
|
||||
self.cache.remember_rel_size(
|
||||
&req.reltag(),
|
||||
req.block_number + req.nblocks,
|
||||
Lsn(req.lsn),
|
||||
);
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
NeonIORequest::RelCreate(req) => {
|
||||
// TODO: need to grab an io-in-progress lock for this? I guess not
|
||||
self.cache.remember_rel_size(&req.reltag(), 0, Lsn(req.lsn));
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
NeonIORequest::RelTruncate(req) => {
|
||||
// TODO: need to grab an io-in-progress lock for this? I guess not
|
||||
self.cache
|
||||
.remember_rel_size(&req.reltag(), req.nblocks, Lsn(req.lsn));
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
NeonIORequest::RelUnlink(req) => {
|
||||
// TODO: need to grab an io-in-progress lock for this? I guess not
|
||||
self.cache.forget_rel(&req.reltag(), None, Lsn(req.lsn));
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
NeonIORequest::UpdateCachedRelSize(req) => {
|
||||
// TODO: need to grab an io-in-progress lock for this? I guess not
|
||||
self.cache
|
||||
.remember_rel_size(&req.reltag(), req.nblocks, Lsn(req.lsn));
|
||||
NeonIOResult::WriteOK
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Subroutine to handle a GetPageV request, since it's a little more complicated than
|
||||
/// others.
|
||||
async fn handle_get_pagev_request(&'t self, req: &CGetPageVRequest) -> Result<(), i32> {
|
||||
let client = self
|
||||
.client
|
||||
.as_ref()
|
||||
.expect("cannot handle requests without client");
|
||||
let rel = req.reltag();
|
||||
|
||||
// Check the cache first
|
||||
//
|
||||
// Note: Because the backends perform a direct lookup in the cache before sending
|
||||
// the request to the communicator process, we expect the pages to almost never
|
||||
// be already in cache. It could happen if:
|
||||
// 1. two backends try to read the same page at the same time, but that should never
|
||||
// happen because there's higher level locking in the Postgres buffer manager, or
|
||||
// 2. a prefetch request finished at the same time as a backend requested the
|
||||
// page. That's much more likely.
|
||||
let mut cache_misses = Vec::with_capacity(req.nblocks as usize);
|
||||
for i in 0..req.nblocks {
|
||||
let blkno = req.block_number + i as u32;
|
||||
|
||||
// note: this is deadlock-safe even though we hold multiple locks at the same time,
|
||||
// because they're always acquired in the same order.
|
||||
let in_progress_guard = self
|
||||
.in_progress_table
|
||||
.lock(RequestInProgressKey::Block(rel, blkno), req.request_id)
|
||||
.await;
|
||||
|
||||
let dest = req.dest[i as usize];
|
||||
let not_modified_since = match self.cache.get_page(&rel, blkno, dest).await {
|
||||
Ok(CacheResult::Found(_)) => {
|
||||
// get_page already copied the block content to the destination
|
||||
trace!("found blk {} in rel {:?} in LFC", blkno, rel);
|
||||
continue;
|
||||
}
|
||||
Ok(CacheResult::NotFound(lsn)) => lsn,
|
||||
Err(_io_error) => return Err(-1), // FIXME errno?
|
||||
};
|
||||
cache_misses.push((blkno, not_modified_since, dest, in_progress_guard));
|
||||
}
|
||||
self.getpage_cache_misses_counter
|
||||
.inc_by(cache_misses.len() as i64);
|
||||
self.getpage_cache_hits_counter
|
||||
.inc_by(req.nblocks as i64 - cache_misses.len() as i64);
|
||||
|
||||
if cache_misses.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let not_modified_since = cache_misses
|
||||
.iter()
|
||||
.map(|(_blkno, lsn, _dest, _guard)| *lsn)
|
||||
.max()
|
||||
.unwrap();
|
||||
|
||||
// Construct a pageserver request for the cache misses
|
||||
let block_numbers: Vec<u32> = cache_misses
|
||||
.iter()
|
||||
.map(|(blkno, _lsn, _dest, _guard)| *blkno)
|
||||
.collect();
|
||||
let read_lsn = self.request_lsns(not_modified_since);
|
||||
info!(
|
||||
"sending getpage request for blocks {:?} in rel {:?} lsns {}",
|
||||
block_numbers, rel, read_lsn
|
||||
);
|
||||
match client
|
||||
.get_page(page_api::GetPageRequest {
|
||||
request_id: req.request_id.into(),
|
||||
request_class: page_api::GetPageClass::Normal,
|
||||
read_lsn,
|
||||
rel,
|
||||
block_numbers: block_numbers.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(resp) => {
|
||||
// Write the received page images directly to the shared memory location
|
||||
// that the backend requested.
|
||||
if resp.pages.len() != block_numbers.len() {
|
||||
error!(
|
||||
"received unexpected response with {} page images from pageserver for a request for {} pages",
|
||||
resp.pages.len(),
|
||||
block_numbers.len(),
|
||||
);
|
||||
return Err(-1);
|
||||
}
|
||||
|
||||
info!(
|
||||
"received getpage response for blocks {:?} in rel {:?} lsns {}",
|
||||
block_numbers, rel, read_lsn
|
||||
);
|
||||
|
||||
for (page, (blkno, _lsn, dest, _guard)) in resp.pages.into_iter().zip(cache_misses)
|
||||
{
|
||||
let src: &[u8] = page.image.as_ref();
|
||||
let len = std::cmp::min(src.len(), dest.bytes_total());
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(src.as_ptr(), dest.as_mut_ptr(), len);
|
||||
};
|
||||
|
||||
// Also store it in the LFC while we have it
|
||||
self.cache
|
||||
.remember_page(
|
||||
&rel,
|
||||
blkno,
|
||||
page.image,
|
||||
read_lsn.not_modified_since_lsn.unwrap(),
|
||||
false,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
info!("tonic error: {err:?}");
|
||||
return Err(-1);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Subroutine to handle a PrefetchV request, since it's a little more complicated than
|
||||
/// others.
|
||||
///
|
||||
/// This is very similar to a GetPageV request, but the results are only stored in the cache.
|
||||
async fn handle_prefetchv_request(&'static self, req: &CPrefetchVRequest) -> Result<(), i32> {
|
||||
let client = self
|
||||
.client
|
||||
.as_ref()
|
||||
.expect("cannot handle requests without client");
|
||||
let rel = req.reltag();
|
||||
|
||||
// Check the cache first
|
||||
let mut cache_misses = Vec::with_capacity(req.nblocks as usize);
|
||||
for i in 0..req.nblocks {
|
||||
let blkno = req.block_number + i as u32;
|
||||
|
||||
// note: this is deadlock-safe even though we hold multiple locks at the same time,
|
||||
// because they're always acquired in the same order.
|
||||
let in_progress_guard = self
|
||||
.in_progress_table
|
||||
.lock(RequestInProgressKey::Block(rel, blkno), req.request_id)
|
||||
.await;
|
||||
|
||||
let not_modified_since = match self.cache.page_is_cached(&rel, blkno).await {
|
||||
Ok(CacheResult::Found(_)) => {
|
||||
trace!("found blk {} in rel {:?} in LFC", blkno, rel);
|
||||
continue;
|
||||
}
|
||||
Ok(CacheResult::NotFound(lsn)) => lsn,
|
||||
Err(_io_error) => return Err(-1), // FIXME errno?
|
||||
};
|
||||
cache_misses.push((blkno, not_modified_since, in_progress_guard));
|
||||
}
|
||||
if cache_misses.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let not_modified_since = cache_misses
|
||||
.iter()
|
||||
.map(|(_blkno, lsn, _guard)| *lsn)
|
||||
.max()
|
||||
.unwrap();
|
||||
let block_numbers: Vec<u32> = cache_misses
|
||||
.iter()
|
||||
.map(|(blkno, _lsn, _guard)| *blkno)
|
||||
.collect();
|
||||
|
||||
// TODO: spawn separate tasks for these. Use the integrated cache to keep track of the
|
||||
// in-flight requests
|
||||
|
||||
match client
|
||||
.get_page(page_api::GetPageRequest {
|
||||
request_id: req.request_id.into(),
|
||||
request_class: page_api::GetPageClass::Prefetch,
|
||||
read_lsn: self.request_lsns(not_modified_since),
|
||||
rel,
|
||||
block_numbers: block_numbers.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(resp) => {
|
||||
trace!(
|
||||
"prefetch completed, remembering blocks {:?} in rel {:?} in LFC",
|
||||
block_numbers, rel
|
||||
);
|
||||
if resp.pages.len() != block_numbers.len() {
|
||||
error!(
|
||||
"received unexpected response with {} page images from pageserver for a request for {} pages",
|
||||
resp.pages.len(),
|
||||
block_numbers.len(),
|
||||
);
|
||||
return Err(-1);
|
||||
}
|
||||
|
||||
for (page, (blkno, _lsn, _guard)) in resp.pages.into_iter().zip(cache_misses) {
|
||||
self.cache
|
||||
.remember_page(&rel, blkno, page.image, not_modified_since, false)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
info!("tonic error: {err:?}");
|
||||
return Err(-1);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> MetricGroup<T> for CommunicatorWorkerProcessStruct<'_>
|
||||
where
|
||||
T: Encoding,
|
||||
GaugeState: MetricEncoding<T>,
|
||||
{
|
||||
fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> {
|
||||
self.lfc_metrics.collect_group_into(enc)
|
||||
use measured::metric::MetricFamilyEncoding;
|
||||
use measured::metric::name::MetricName;
|
||||
|
||||
self.lfc_metrics.collect_group_into(enc)?;
|
||||
self.request_counters
|
||||
.collect_family_into(MetricName::from_str("request_counters"), enc)?;
|
||||
self.request_nblocks_counters
|
||||
.collect_family_into(MetricName::from_str("request_nblocks_counters"), enc)?;
|
||||
|
||||
// FIXME: allocator metrics
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
//! - launch the main loop,
|
||||
//! - receive IO requests from backends and process them,
|
||||
//! - write results back to backends.
|
||||
|
||||
mod callbacks;
|
||||
mod control_socket;
|
||||
mod in_progress_ios;
|
||||
mod lfc_metrics;
|
||||
mod logging;
|
||||
mod main_loop;
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
//! Functions called from the C code in the worker process
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::{CStr, CString, c_char};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::init::CommunicatorInitStruct;
|
||||
use crate::worker_process::main_loop;
|
||||
use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct;
|
||||
|
||||
use pageserver_client_grpc::ShardStripeSize;
|
||||
|
||||
/// Launch the communicator's tokio tasks, which do most of the work.
|
||||
///
|
||||
/// The caller has initialized the process as a regular PostgreSQL background worker
|
||||
/// process.
|
||||
/// process. The shared memory segment used to communicate with the backends has been
|
||||
/// allocated and initialized earlier, at postmaster startup, in
|
||||
/// rcommunicator_shmem_init().
|
||||
///
|
||||
/// Inputs:
|
||||
/// `tenant_id` and `timeline_id` can be NULL, if we're been launched in "non-Neon" mode,
|
||||
@@ -23,11 +30,19 @@ use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct;
|
||||
/// This is called only once in the process, so the returned struct, and error message in
|
||||
/// case of failure, are simply leaked.
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn communicator_worker_launch(
|
||||
pub extern "C" fn communicator_worker_process_launch(
|
||||
cis: Box<CommunicatorInitStruct>,
|
||||
tenant_id: *const c_char,
|
||||
timeline_id: *const c_char,
|
||||
auth_token: *const c_char,
|
||||
shard_map: *mut *mut c_char,
|
||||
nshards: u32,
|
||||
stripe_size: u32,
|
||||
file_cache_path: *const c_char,
|
||||
initial_file_cache_size: u64,
|
||||
error_p: *mut *const c_char,
|
||||
) -> Option<&'static CommunicatorWorkerProcessStruct> {
|
||||
) -> Option<&'static CommunicatorWorkerProcessStruct<'static>> {
|
||||
tracing::warn!("starting threads in rust code");
|
||||
// Convert the arguments into more convenient Rust types
|
||||
let tenant_id = if tenant_id.is_null() {
|
||||
None
|
||||
@@ -41,9 +56,41 @@ pub extern "C" fn communicator_worker_launch(
|
||||
let cstr = unsafe { CStr::from_ptr(timeline_id) };
|
||||
Some(cstr.to_str().expect("assume UTF-8"))
|
||||
};
|
||||
let auth_token = if auth_token.is_null() {
|
||||
None
|
||||
} else {
|
||||
let cstr = unsafe { CStr::from_ptr(auth_token) };
|
||||
Some(cstr.to_str().expect("assume UTF-8"))
|
||||
};
|
||||
let file_cache_path = {
|
||||
if file_cache_path.is_null() {
|
||||
None
|
||||
} else {
|
||||
let c_str = unsafe { CStr::from_ptr(file_cache_path) };
|
||||
Some(PathBuf::from(c_str.to_str().unwrap()))
|
||||
}
|
||||
};
|
||||
let shard_map = shard_map_to_hash(nshards, shard_map);
|
||||
// FIXME: distinguish between unsharded, and sharded with 1 shard
|
||||
// Also, we might go from unsharded to sharded while the system
|
||||
// is running.
|
||||
let stripe_size = if stripe_size > 0 && nshards > 1 {
|
||||
Some(ShardStripeSize(stripe_size))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// The `init` function does all the work.
|
||||
let result = main_loop::init(tenant_id, timeline_id);
|
||||
let result = main_loop::init(
|
||||
*cis,
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
auth_token,
|
||||
shard_map,
|
||||
stripe_size,
|
||||
initial_file_cache_size,
|
||||
file_cache_path,
|
||||
);
|
||||
|
||||
// On failure, return the error message to the C caller in *error_p.
|
||||
match result {
|
||||
@@ -58,3 +105,47 @@ pub extern "C" fn communicator_worker_launch(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the "shard map" from an array of C strings, indexed by shard no to a rust HashMap
|
||||
fn shard_map_to_hash(
|
||||
nshards: u32,
|
||||
shard_map: *mut *mut c_char,
|
||||
) -> HashMap<utils::shard::ShardIndex, String> {
|
||||
use utils::shard::*;
|
||||
|
||||
assert!(nshards <= u8::MAX as u32);
|
||||
|
||||
let mut result: HashMap<ShardIndex, String> = HashMap::new();
|
||||
let mut p = shard_map;
|
||||
|
||||
for i in 0..nshards {
|
||||
let c_str = unsafe { CStr::from_ptr(*p) };
|
||||
|
||||
p = unsafe { p.add(1) };
|
||||
|
||||
let s = c_str.to_str().unwrap();
|
||||
let k = if nshards > 1 {
|
||||
ShardIndex::new(ShardNumber(i as u8), ShardCount(nshards as u8))
|
||||
} else {
|
||||
ShardIndex::unsharded()
|
||||
};
|
||||
result.insert(k, s.into());
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Inform the rust code about a configuration change
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn communicator_worker_config_reload(
|
||||
proc_handle: &'static CommunicatorWorkerProcessStruct<'static>,
|
||||
file_cache_size: u64,
|
||||
shard_map: *mut *mut c_char,
|
||||
nshards: u32,
|
||||
stripe_size: u32,
|
||||
) {
|
||||
proc_handle.cache.resize_file_cache(file_cache_size as u32);
|
||||
|
||||
let shard_map = shard_map_to_hash(nshards, shard_map);
|
||||
let stripe_size = (nshards > 1).then_some(ShardStripeSize(stripe_size));
|
||||
proc_handle.update_shard_map(shard_map, stripe_size);
|
||||
}
|
||||
|
||||
1366
pgxn/neon/communicator_new.c
Normal file
1366
pgxn/neon/communicator_new.c
Normal file
File diff suppressed because it is too large
Load Diff
66
pgxn/neon/communicator_new.h
Normal file
66
pgxn/neon/communicator_new.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* communicator_new.h
|
||||
* new implementation
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#ifndef COMMUNICATOR_NEW_H
|
||||
#define COMMUNICATOR_NEW_H
|
||||
|
||||
#include "storage/buf_internals.h"
|
||||
|
||||
#include "lfc_prewarm.h"
|
||||
#include "neon.h"
|
||||
#include "neon_pgversioncompat.h"
|
||||
#include "pagestore_client.h"
|
||||
|
||||
/* initialization at postmaster startup */
|
||||
extern void CommunicatorNewShmemRequest(void);
|
||||
extern void CommunicatorNewShmemInit(void);
|
||||
|
||||
/* initialization at backend startup */
|
||||
extern void communicator_new_init(void);
|
||||
|
||||
/* Read requests */
|
||||
extern bool communicator_new_rel_exists(NRelFileInfo rinfo, ForkNumber forkNum);
|
||||
extern BlockNumber communicator_new_rel_nblocks(NRelFileInfo rinfo, ForkNumber forknum);
|
||||
extern int64 communicator_new_dbsize(Oid dbNode);
|
||||
extern void communicator_new_read_at_lsnv(NRelFileInfo rinfo, ForkNumber forkNum,
|
||||
BlockNumber base_blockno,
|
||||
void **buffers, BlockNumber nblocks);
|
||||
extern void communicator_new_prefetch_register_bufferv(NRelFileInfo rinfo, ForkNumber forkNum,
|
||||
BlockNumber blockno,
|
||||
BlockNumber nblocks);
|
||||
extern bool communicator_new_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum,
|
||||
BlockNumber blockno);
|
||||
extern int communicator_new_read_slru_segment(
|
||||
SlruKind kind,
|
||||
uint32_t segno,
|
||||
neon_request_lsns *request_lsns,
|
||||
const char *path
|
||||
);
|
||||
|
||||
/* Write requests, to keep the caches up-to-date */
|
||||
extern void communicator_new_write_page(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno,
|
||||
const void *buffer, XLogRecPtr lsn);
|
||||
extern void communicator_new_rel_extend(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno,
|
||||
const void *buffer, XLogRecPtr lsn);
|
||||
extern void communicator_new_rel_zeroextend(NRelFileInfo rinfo, ForkNumber forkNum,
|
||||
BlockNumber blockno, BlockNumber nblocks,
|
||||
XLogRecPtr lsn);
|
||||
extern void communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum, XLogRecPtr lsn);
|
||||
extern void communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks, XLogRecPtr lsn);
|
||||
extern void communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum, XLogRecPtr lsn);
|
||||
extern void communicator_new_update_cached_rel_size(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks, XLogRecPtr lsn);
|
||||
|
||||
/* other functions */
|
||||
extern int32 communicator_new_approximate_working_set_size_seconds(time_t duration, bool reset);
|
||||
extern FileCacheState *communicator_new_get_lfc_state(size_t max_entries);
|
||||
extern LfcStatsEntry *communicator_new_get_lfc_stats(void);
|
||||
|
||||
#endif /* COMMUNICATOR_NEW_H */
|
||||
@@ -18,6 +18,9 @@
|
||||
#include <unistd.h>
|
||||
|
||||
#include "miscadmin.h"
|
||||
#if PG_VERSION_NUM >= 150000
|
||||
#include "access/xlogrecovery.h"
|
||||
#endif
|
||||
#include "postmaster/bgworker.h"
|
||||
#include "postmaster/interrupt.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
@@ -33,10 +36,13 @@
|
||||
#include "file_cache.h"
|
||||
#include "neon.h"
|
||||
#include "neon_perf_counters.h"
|
||||
#include "pagestore_client.h"
|
||||
|
||||
/* the rust bindings, generated by cbindgen */
|
||||
#include "communicator/communicator_bindings.h"
|
||||
|
||||
struct CommunicatorInitStruct *cis;
|
||||
|
||||
static void pump_logging(struct LoggingReceiver *logging);
|
||||
PGDLLEXPORT void communicator_new_bgworker_main(Datum main_arg);
|
||||
|
||||
@@ -70,6 +76,9 @@ pg_init_communicator_process(void)
|
||||
void
|
||||
communicator_new_bgworker_main(Datum main_arg)
|
||||
{
|
||||
char **connstrings;
|
||||
ShardMap shard_map;
|
||||
uint64 file_cache_size;
|
||||
struct LoggingReceiver *logging;
|
||||
const char *errmsg = NULL;
|
||||
const struct CommunicatorWorkerProcessStruct *proc_handle;
|
||||
@@ -94,6 +103,20 @@ communicator_new_bgworker_main(Datum main_arg)
|
||||
|
||||
BackgroundWorkerUnblockSignals();
|
||||
|
||||
/* lfc_size_limit is in MBs */
|
||||
file_cache_size = lfc_size_limit * (1024 * 1024 / BLCKSZ);
|
||||
if (file_cache_size < 100)
|
||||
file_cache_size = 100;
|
||||
|
||||
if (!parse_shard_map(pageserver_grpc_urls, &shard_map))
|
||||
{
|
||||
/* shouldn't happen, as the GUC was verified already */
|
||||
elog(FATAL, "could not parse neon.pageserver_grpcs_urls");
|
||||
}
|
||||
connstrings = palloc(shard_map.num_shards * sizeof(char *));
|
||||
for (int i = 0; i < shard_map.num_shards; i++)
|
||||
connstrings[i] = shard_map.connstring[i];
|
||||
|
||||
/*
|
||||
* By default, INFO messages are not printed to the log. We want
|
||||
* `tracing::info!` messages emitted from the communicator to be printed,
|
||||
@@ -108,11 +131,20 @@ communicator_new_bgworker_main(Datum main_arg)
|
||||
|
||||
logging = communicator_worker_configure_logging();
|
||||
|
||||
proc_handle = communicator_worker_launch(
|
||||
neon_tenant[0] == '\0' ? NULL : neon_tenant,
|
||||
neon_timeline[0] == '\0' ? NULL : neon_timeline,
|
||||
&errmsg
|
||||
);
|
||||
Assert(cis != NULL);
|
||||
proc_handle = communicator_worker_process_launch(
|
||||
cis,
|
||||
neon_tenant[0] == '\0' ? NULL : neon_tenant,
|
||||
neon_timeline[0] == '\0' ? NULL : neon_timeline,
|
||||
neon_auth_token,
|
||||
connstrings,
|
||||
shard_map.num_shards,
|
||||
neon_stripe_size,
|
||||
lfc_path,
|
||||
file_cache_size,
|
||||
&errmsg);
|
||||
pfree(connstrings);
|
||||
cis = NULL;
|
||||
if (proc_handle == NULL)
|
||||
{
|
||||
/*
|
||||
@@ -173,6 +205,28 @@ communicator_new_bgworker_main(Datum main_arg)
|
||||
{
|
||||
ConfigReloadPending = false;
|
||||
ProcessConfigFile(PGC_SIGHUP);
|
||||
|
||||
/* lfc_size_limit is in MBs */
|
||||
file_cache_size = lfc_size_limit * (1024 * 1024 / BLCKSZ);
|
||||
if (file_cache_size < 100)
|
||||
file_cache_size = 100;
|
||||
|
||||
/* Reload pageserver URLs */
|
||||
if (!parse_shard_map(pageserver_grpc_urls, &shard_map))
|
||||
{
|
||||
/* shouldn't happen, as the GUC was verified already */
|
||||
elog(FATAL, "could not parse neon.pageserver_grpcs_urls");
|
||||
}
|
||||
connstrings = palloc(shard_map.num_shards * sizeof(char *));
|
||||
for (int i = 0; i < shard_map.num_shards; i++)
|
||||
connstrings[i] = shard_map.connstring[i];
|
||||
|
||||
communicator_worker_config_reload(proc_handle,
|
||||
file_cache_size,
|
||||
connstrings,
|
||||
shard_map.num_shards,
|
||||
neon_stripe_size);
|
||||
pfree(connstrings);
|
||||
}
|
||||
|
||||
duration = TimestampDifferenceMilliseconds(before, GetCurrentTimestamp());
|
||||
@@ -271,3 +325,36 @@ callback_set_my_latch_unsafe(void)
|
||||
{
|
||||
SetLatch(MyLatch);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: The logic from neon_get_request_lsns() needs to go here, except for
|
||||
* the last-written LSN cache stuff, which is managed by the rust code now.
|
||||
*/
|
||||
XLogRecPtr
|
||||
callback_get_request_lsn_unsafe(void)
|
||||
{
|
||||
/*
|
||||
* NB: be very careful with what you do here! This is called from tokio
|
||||
* threads, so anything tha tries to take LWLocks is unsafe, for example.
|
||||
*
|
||||
* RecoveryInProgress() is OK
|
||||
*/
|
||||
if (RecoveryInProgress())
|
||||
{
|
||||
XLogRecPtr replay_lsn = GetXLogReplayRecPtr(NULL);
|
||||
|
||||
return replay_lsn;
|
||||
}
|
||||
else
|
||||
{
|
||||
XLogRecPtr flushlsn;
|
||||
|
||||
#if PG_VERSION_NUM >= 150000
|
||||
flushlsn = GetFlushRecPtr(NULL);
|
||||
#else
|
||||
flushlsn = GetFlushRecPtr();
|
||||
#endif
|
||||
|
||||
return flushlsn;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
#ifndef COMMUNICATOR_PROCESS_H
|
||||
#define COMMUNICATOR_PROCESS_H
|
||||
|
||||
extern struct CommunicatorInitStruct *cis;
|
||||
|
||||
/* initialization early at postmaster startup */
|
||||
extern void pg_init_communicator_process(void);
|
||||
|
||||
#endif /* COMMUNICATOR_PROCESS_H */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user