mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-05 20:42:54 +00:00
Merge remote-tracking branch 'origin' into vlad/hadron-jwt
This commit is contained in:
2
.github/actionlint.yml
vendored
2
.github/actionlint.yml
vendored
@@ -31,7 +31,7 @@ config-variables:
|
||||
- NEON_PROD_AWS_ACCOUNT_ID
|
||||
- PGREGRESS_PG16_PROJECT_ID
|
||||
- PGREGRESS_PG17_PROJECT_ID
|
||||
- PREWARM_PGBENCH_SIZE
|
||||
- PREWARM_PROJECT_ID
|
||||
- REMOTE_STORAGE_AZURE_CONTAINER
|
||||
- REMOTE_STORAGE_AZURE_REGION
|
||||
- SLACK_CICD_CHANNEL_ID
|
||||
|
||||
384
.github/workflows/benchbase_tpcc.yml
vendored
Normal file
384
.github/workflows/benchbase_tpcc.yml
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
name: TPC-C like benchmark using benchbase
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
# │ ┌───────────── hour (0 - 23)
|
||||
# │ │ ┌───────────── day of the month (1 - 31)
|
||||
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
|
||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||
- cron: '0 6 * * *' # run once a day at 6 AM UTC
|
||||
workflow_dispatch: # adds ability to run this manually
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -euxo pipefail {0}
|
||||
|
||||
concurrency:
|
||||
# Allow only one workflow globally because we do not want to be too noisy in production environment
|
||||
group: benchbase-tpcc-workflow
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
benchbase-tpcc:
|
||||
strategy:
|
||||
fail-fast: false # allow other variants to continue even if one fails
|
||||
matrix:
|
||||
include:
|
||||
- warehouses: 50 # defines number of warehouses and is used to compute number of terminals
|
||||
max_rate: 800 # measured max TPS at scale factor based on experiments. Adjust if performance is better/worse
|
||||
min_cu: 0.25 # simulate free tier plan (0.25 -2 CU)
|
||||
max_cu: 2
|
||||
- warehouses: 500 # serverless plan (2-8 CU)
|
||||
max_rate: 2000
|
||||
min_cu: 2
|
||||
max_cu: 8
|
||||
- warehouses: 1000 # business plan (2-16 CU)
|
||||
max_rate: 2900
|
||||
min_cu: 2
|
||||
max_cu: 16
|
||||
max-parallel: 1 # we want to run each workload size sequentially to avoid noisy neighbors
|
||||
permissions:
|
||||
contents: write
|
||||
statuses: write
|
||||
id-token: write # aws-actions/configure-aws-credentials
|
||||
env:
|
||||
PG_CONFIG: /tmp/neon/pg_install/v17/bin/pg_config
|
||||
PSQL: /tmp/neon/pg_install/v17/bin/psql
|
||||
PG_17_LIB_PATH: /tmp/neon/pg_install/v17/lib
|
||||
POSTGRES_VERSION: 17
|
||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||
timeout-minutes: 1440
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Configure AWS credentials # necessary to download artefacts
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
with:
|
||||
aws-region: eu-central-1
|
||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
||||
|
||||
- name: Download Neon artifact
|
||||
uses: ./.github/actions/download
|
||||
with:
|
||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||
path: /tmp/neon/
|
||||
prefix: latest
|
||||
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
|
||||
- name: Create Neon Project
|
||||
id: create-neon-project-tpcc
|
||||
uses: ./.github/actions/neon-project-create
|
||||
with:
|
||||
region_id: aws-us-east-2
|
||||
postgres_version: ${{ env.POSTGRES_VERSION }}
|
||||
compute_units: '[${{ matrix.min_cu }}, ${{ matrix.max_cu }}]'
|
||||
api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }}
|
||||
api_host: console.neon.tech # production (!)
|
||||
|
||||
- name: Initialize Neon project
|
||||
env:
|
||||
BENCHMARK_TPCC_CONNSTR: ${{ steps.create-neon-project-tpcc.outputs.dsn }}
|
||||
PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
run: |
|
||||
echo "Initializing Neon project with project_id: ${PROJECT_ID}"
|
||||
export LD_LIBRARY_PATH=${PG_17_LIB_PATH}
|
||||
|
||||
# Retry logic for psql connection with 1 minute sleep between attempts
|
||||
for attempt in {1..3}; do
|
||||
echo "Attempt ${attempt}/3: Creating extensions in Neon project"
|
||||
if ${PSQL} "${BENCHMARK_TPCC_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"; then
|
||||
echo "Successfully created extensions"
|
||||
break
|
||||
else
|
||||
echo "Failed to create extensions on attempt ${attempt}"
|
||||
if [ ${attempt} -lt 3 ]; then
|
||||
echo "Waiting 60 seconds before retry..."
|
||||
sleep 60
|
||||
else
|
||||
echo "All attempts failed, exiting"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "BENCHMARK_TPCC_CONNSTR=${BENCHMARK_TPCC_CONNSTR}" >> $GITHUB_ENV
|
||||
|
||||
- name: Generate BenchBase workload configuration
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
MAX_RATE: ${{ matrix.max_rate }}
|
||||
run: |
|
||||
echo "Generating BenchBase configs for warehouses: ${WAREHOUSES}, max_rate: ${MAX_RATE}"
|
||||
|
||||
# Extract hostname and password from connection string
|
||||
# Format: postgresql://username:password@hostname/database?params (no port for Neon)
|
||||
HOSTNAME=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:[^@]*@\([^/]*\)/.*|\1|p')
|
||||
PASSWORD=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:\([^@]*\)@.*|\1|p')
|
||||
|
||||
echo "Extracted hostname: ${HOSTNAME}"
|
||||
|
||||
# Use runner temp (NVMe) as working directory
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
# Copy the generator script
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py" .
|
||||
|
||||
# Generate configs and scripts
|
||||
python3 generate_workload_size.py \
|
||||
--warehouses ${WAREHOUSES} \
|
||||
--max-rate ${MAX_RATE} \
|
||||
--hostname ${HOSTNAME} \
|
||||
--password ${PASSWORD} \
|
||||
--runner-arch ${{ runner.arch }}
|
||||
|
||||
# Fix path mismatch: move generated configs and scripts to expected locations
|
||||
mv ../configs ./configs
|
||||
mv ../scripts ./scripts
|
||||
|
||||
- name: Prepare database (load data)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Loading ${WAREHOUSES} warehouses into database..."
|
||||
|
||||
# Run the loader script and capture output to log file while preserving stdout/stderr
|
||||
./scripts/load_${WAREHOUSES}_warehouses.sh 2>&1 | tee "load_${WAREHOUSES}_warehouses.log"
|
||||
|
||||
echo "Database loading completed"
|
||||
|
||||
- name: Run TPC-C benchmark (warmup phase, then benchmark at 70% of configuredmax TPS)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Running TPC-C benchmark with ${WAREHOUSES} warehouses..."
|
||||
|
||||
# Run the optimal rate benchmark
|
||||
./scripts/execute_${WAREHOUSES}_warehouses_opt_rate.sh
|
||||
|
||||
echo "Benchmark execution completed"
|
||||
|
||||
- name: Run TPC-C benchmark (warmup phase, then ramp down TPS and up again in 5 minute intervals)
|
||||
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Running TPC-C ramp-down-up with ${WAREHOUSES} warehouses..."
|
||||
|
||||
# Run the optimal rate benchmark
|
||||
./scripts/execute_${WAREHOUSES}_warehouses_ramp_up.sh
|
||||
|
||||
echo "Benchmark execution completed"
|
||||
|
||||
- name: Process results (upload to test results database and generate diagrams)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
MIN_CU: ${{ matrix.min_cu }}
|
||||
MAX_CU: ${{ matrix.max_cu }}
|
||||
PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
REVISION: ${{ github.sha }}
|
||||
PERF_DB_CONNSTR: ${{ secrets.PERF_TEST_RESULT_CONNSTR }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Creating temporary Python environment for results processing..."
|
||||
|
||||
# Create temporary virtual environment
|
||||
python3 -m venv temp_results_env
|
||||
source temp_results_env/bin/activate
|
||||
|
||||
# Install required packages in virtual environment
|
||||
pip install matplotlib pandas psycopg2-binary
|
||||
|
||||
echo "Copying results processing scripts..."
|
||||
|
||||
# Copy both processing scripts
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py" .
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py" .
|
||||
|
||||
echo "Processing load phase metrics..."
|
||||
|
||||
# Find and process load log
|
||||
LOAD_LOG=$(find . -name "load_${WAREHOUSES}_warehouses.log" -type f | head -1)
|
||||
if [ -n "$LOAD_LOG" ]; then
|
||||
echo "Processing load metrics from: $LOAD_LOG"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--load-log "$LOAD_LOG" \
|
||||
--run-type "load" \
|
||||
--warehouses "${WAREHOUSES}" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Load log file not found: load_${WAREHOUSES}_warehouses.log"
|
||||
fi
|
||||
|
||||
echo "Processing warmup results for optimal rate..."
|
||||
|
||||
# Find and process warmup results
|
||||
WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | head -1)
|
||||
WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then
|
||||
echo "Generating warmup diagram from: $WARMUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$WARMUP_CSV" \
|
||||
--output-svg "warmup_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "Warmup at max TPS"
|
||||
|
||||
echo "Uploading warmup metrics from: $WARMUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$WARMUP_JSON" \
|
||||
--results-csv "$WARMUP_CSV" \
|
||||
--run-type "warmup" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing optimal rate results..."
|
||||
|
||||
# Find and process optimal rate results
|
||||
OPTRATE_CSV=$(find results_opt_rate -name "*.results.csv" -type f | head -1)
|
||||
OPTRATE_JSON=$(find results_opt_rate -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$OPTRATE_CSV" ] && [ -n "$OPTRATE_JSON" ]; then
|
||||
echo "Generating optimal rate diagram from: $OPTRATE_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$OPTRATE_CSV" \
|
||||
--output-svg "benchmark_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "70% of max TPS"
|
||||
|
||||
echo "Uploading optimal rate metrics from: $OPTRATE_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$OPTRATE_JSON" \
|
||||
--results-csv "$OPTRATE_CSV" \
|
||||
--run-type "opt-rate" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing optimal rate results files (CSV: $OPTRATE_CSV, JSON: $OPTRATE_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing warmup 2 results for ramp down/up phase..."
|
||||
|
||||
# Find and process warmup results
|
||||
WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | tail -1)
|
||||
WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | tail -1)
|
||||
|
||||
if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then
|
||||
echo "Generating warmup diagram from: $WARMUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$WARMUP_CSV" \
|
||||
--output-svg "warmup_2_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "Warmup at max TPS"
|
||||
|
||||
echo "Uploading warmup metrics from: $WARMUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$WARMUP_JSON" \
|
||||
--results-csv "$WARMUP_CSV" \
|
||||
--run-type "warmup" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing ramp results..."
|
||||
|
||||
# Find and process ramp results
|
||||
RAMPUP_CSV=$(find results_ramp_up -name "*.results.csv" -type f | head -1)
|
||||
RAMPUP_JSON=$(find results_ramp_up -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$RAMPUP_CSV" ] && [ -n "$RAMPUP_JSON" ]; then
|
||||
echo "Generating ramp diagram from: $RAMPUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$RAMPUP_CSV" \
|
||||
--output-svg "ramp_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "ramp TPS down and up in 5 minute intervals"
|
||||
|
||||
echo "Uploading ramp metrics from: $RAMPUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$RAMPUP_JSON" \
|
||||
--results-csv "$RAMPUP_CSV" \
|
||||
--run-type "ramp-up" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing ramp results files (CSV: $RAMPUP_CSV, JSON: $RAMPUP_JSON)"
|
||||
fi
|
||||
|
||||
# Deactivate and clean up virtual environment
|
||||
deactivate
|
||||
rm -rf temp_results_env
|
||||
rm upload_results_to_perf_test_results.py
|
||||
|
||||
echo "Results processing completed and environment cleaned up"
|
||||
|
||||
- name: Set date for upload
|
||||
id: set-date
|
||||
run: echo "date=$(date +%Y-%m-%d)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Configure AWS credentials # necessary to upload results
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
with:
|
||||
aws-region: us-east-2
|
||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
role-duration-seconds: 900 # 900 is minimum value
|
||||
|
||||
- name: Upload benchmark results to S3
|
||||
env:
|
||||
S3_BUCKET: neon-public-benchmark-results
|
||||
S3_PREFIX: benchbase-tpc-c/${{ steps.set-date.outputs.date }}/${{ github.run_id }}/${{ matrix.warehouses }}-warehouses
|
||||
run: |
|
||||
echo "Redacting passwords from configuration files before upload..."
|
||||
|
||||
# Mask all passwords in XML config files
|
||||
find "${RUNNER_TEMP}/configs" -name "*.xml" -type f -exec sed -i 's|<password>[^<]*</password>|<password>redacted</password>|g' {} \;
|
||||
|
||||
echo "Uploading benchmark results to s3://${S3_BUCKET}/${S3_PREFIX}/"
|
||||
|
||||
# Upload the entire benchmark directory recursively
|
||||
aws s3 cp --only-show-errors --recursive "${RUNNER_TEMP}" s3://${S3_BUCKET}/${S3_PREFIX}/
|
||||
|
||||
echo "Upload completed"
|
||||
|
||||
- name: Delete Neon Project
|
||||
if: ${{ always() }}
|
||||
uses: ./.github/actions/neon-project-delete
|
||||
with:
|
||||
project_id: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }}
|
||||
api_host: console.neon.tech # production (!)
|
||||
2
.github/workflows/benchmarking.yml
vendored
2
.github/workflows/benchmarking.yml
vendored
@@ -418,7 +418,7 @@ jobs:
|
||||
statuses: write
|
||||
id-token: write # aws-actions/configure-aws-credentials
|
||||
env:
|
||||
PGBENCH_SIZE: ${{ vars.PREWARM_PGBENCH_SIZE }}
|
||||
PROJECT_ID: ${{ vars.PREWARM_PROJECT_ID }}
|
||||
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
|
||||
DEFAULT_PG_VERSION: 17
|
||||
TEST_OUTPUT: /tmp/test_output
|
||||
|
||||
27
.github/workflows/pg-clients.yml
vendored
27
.github/workflows/pg-clients.yml
vendored
@@ -48,8 +48,20 @@ jobs:
|
||||
uses: ./.github/workflows/build-build-tools-image.yml
|
||||
secrets: inherit
|
||||
|
||||
generate-ch-tmppw:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
tmp_val: ${{ steps.pwgen.outputs.tmp_val }}
|
||||
steps:
|
||||
- name: Generate a random password
|
||||
id: pwgen
|
||||
run: |
|
||||
set +x
|
||||
p=$(dd if=/dev/random bs=14 count=1 2>/dev/null | base64)
|
||||
echo tmp_val="${p//\//}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
test-logical-replication:
|
||||
needs: [ build-build-tools-image ]
|
||||
needs: [ build-build-tools-image, generate-ch-tmppw ]
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
container:
|
||||
@@ -60,16 +72,20 @@ jobs:
|
||||
options: --init --user root
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:24.6.3.64
|
||||
image: clickhouse/clickhouse-server:24.8
|
||||
env:
|
||||
CLICKHOUSE_PASSWORD: ${{ needs.generate-ch-tmppw.outputs.tmp_val }}
|
||||
ports:
|
||||
- 9000:9000
|
||||
- 8123:8123
|
||||
zookeeper:
|
||||
image: quay.io/debezium/zookeeper:2.7
|
||||
image: quay.io/debezium/zookeeper:3.1.3.Final
|
||||
ports:
|
||||
- 2181:2181
|
||||
- 2888:2888
|
||||
- 3888:3888
|
||||
kafka:
|
||||
image: quay.io/debezium/kafka:2.7
|
||||
image: quay.io/debezium/kafka:3.1.3.Final
|
||||
env:
|
||||
ZOOKEEPER_CONNECT: "zookeeper:2181"
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
|
||||
@@ -79,7 +95,7 @@ jobs:
|
||||
ports:
|
||||
- 9092:9092
|
||||
debezium:
|
||||
image: quay.io/debezium/connect:2.7
|
||||
image: quay.io/debezium/connect:3.1.3.Final
|
||||
env:
|
||||
BOOTSTRAP_SERVERS: kafka:9092
|
||||
GROUP_ID: 1
|
||||
@@ -125,6 +141,7 @@ jobs:
|
||||
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
env:
|
||||
BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }}
|
||||
CLICKHOUSE_PASSWORD: ${{ needs.generate-ch-tmppw.outputs.tmp_val }}
|
||||
|
||||
- name: Delete Neon Project
|
||||
if: always()
|
||||
|
||||
48
.github/workflows/proxy-benchmark.yml
vendored
48
.github/workflows/proxy-benchmark.yml
vendored
@@ -3,7 +3,7 @@ name: Periodic proxy performance test on unit-perf-aws-arm runners
|
||||
on:
|
||||
push: # TODO: remove after testing
|
||||
branches:
|
||||
- test-proxy-bench # Runs on pushes to branches starting with test-proxy-bench
|
||||
- test-proxy-bench # Runs on pushes to test-proxy-bench branch
|
||||
# schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
statuses: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: [self-hosted, unit-perf-aws-arm]
|
||||
runs-on: [ self-hosted, unit-perf-aws-arm ]
|
||||
timeout-minutes: 60 # 1h timeout
|
||||
container:
|
||||
image: ghcr.io/neondatabase/build-tools:pinned-bookworm
|
||||
@@ -55,30 +55,58 @@ jobs:
|
||||
{
|
||||
echo "PROXY_BENCH_PATH=$PROXY_BENCH_PATH"
|
||||
echo "NEON_DIR=${RUNNER_TEMP}/neon"
|
||||
echo "NEON_PROXY_PATH=${RUNNER_TEMP}/neon/bin/proxy"
|
||||
echo "TEST_OUTPUT=${PROXY_BENCH_PATH}/test_output"
|
||||
echo ""
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
- name: Run proxy-bench
|
||||
run: ${PROXY_BENCH_PATH}/run.sh
|
||||
- name: Cache poetry deps
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry/virtualenvs
|
||||
key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-bookworm-${{ hashFiles('poetry.lock') }}
|
||||
|
||||
- name: Ingest Bench Results # neon repo script
|
||||
- name: Install Python deps
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: ./scripts/pysync
|
||||
|
||||
- name: show ulimits
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
ulimit -a
|
||||
|
||||
- name: Run proxy-bench
|
||||
working-directory: ${{ env.PROXY_BENCH_PATH }}
|
||||
run: ./run.sh --with-grafana --bare-metal
|
||||
|
||||
- name: Ingest Bench Results
|
||||
if: always()
|
||||
working-directory: ${{ env.NEON_DIR }}
|
||||
run: |
|
||||
mkdir -p $TEST_OUTPUT
|
||||
python $NEON_DIR/scripts/proxy_bench_results_ingest.py --out $TEST_OUTPUT
|
||||
|
||||
- name: Push Metrics to Proxy perf database
|
||||
shell: bash -euxo pipefail {0}
|
||||
if: always()
|
||||
env:
|
||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PROXY_TEST_RESULT_CONNSTR }}"
|
||||
REPORT_FROM: $TEST_OUTPUT
|
||||
working-directory: ${{ env.NEON_DIR }}
|
||||
run: $NEON_DIR/scripts/generate_and_push_perf_report.sh
|
||||
|
||||
- name: Docker cleanup
|
||||
if: always()
|
||||
run: docker compose down
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
run: echo "Proxy bench job failed" && exit 1
|
||||
run: echo "Proxy bench job failed" && exit 1
|
||||
|
||||
- name: Cleanup Test Resources
|
||||
if: always()
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
# Cleanup the test resources
|
||||
if [[ -d "${TEST_OUTPUT}" ]]; then
|
||||
rm -rf ${TEST_OUTPUT}
|
||||
fi
|
||||
if [[ -d "${PROXY_BENCH_PATH}/test_output" ]]; then
|
||||
rm -rf ${PROXY_BENCH_PATH}/test_output
|
||||
fi
|
||||
208
Cargo.lock
generated
208
Cargo.lock
generated
@@ -250,11 +250,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "async-lock"
|
||||
version = "3.2.0"
|
||||
version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c"
|
||||
checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18"
|
||||
dependencies = [
|
||||
"event-listener 4.0.0",
|
||||
"event-listener 5.4.0",
|
||||
"event-listener-strategy",
|
||||
"pin-project-lite",
|
||||
]
|
||||
@@ -1483,6 +1483,7 @@ dependencies = [
|
||||
"tower-http",
|
||||
"tower-otel",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
"tracing-utils",
|
||||
@@ -1498,9 +1499,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "concurrent-queue"
|
||||
version = "2.3.0"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400"
|
||||
checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
@@ -2349,9 +2350,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
|
||||
|
||||
[[package]]
|
||||
name = "event-listener"
|
||||
version = "4.0.0"
|
||||
version = "5.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae"
|
||||
checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae"
|
||||
dependencies = [
|
||||
"concurrent-queue",
|
||||
"parking",
|
||||
@@ -2360,11 +2361,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "event-listener-strategy"
|
||||
version = "0.4.0"
|
||||
version = "0.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3"
|
||||
checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93"
|
||||
dependencies = [
|
||||
"event-listener 4.0.0",
|
||||
"event-listener 5.4.0",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
@@ -2639,6 +2640,20 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "304de19db7028420975a296ab0fcbbc8e69438c4ed254a1e41e2a7f37d5f0e0a"
|
||||
|
||||
[[package]]
|
||||
name = "generator"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d18470a76cb7f8ff746cf1f7470914f900252ec36bbc40b569d74b1258446827"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"log",
|
||||
"rustversion",
|
||||
"windows 0.61.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.7"
|
||||
@@ -2966,7 +2981,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"windows",
|
||||
"windows 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3237,7 +3252,7 @@ dependencies = [
|
||||
"iana-time-zone-haiku",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
"windows-core",
|
||||
"windows-core 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3794,6 +3809,19 @@ version = "0.4.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e"
|
||||
|
||||
[[package]]
|
||||
name = "loom"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"generator",
|
||||
"scoped-tls",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.12.3"
|
||||
@@ -4010,6 +4038,25 @@ dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"loom",
|
||||
"parking_lot 0.12.1",
|
||||
"portable-atomic",
|
||||
"rustc_version",
|
||||
"smallvec",
|
||||
"tagptr",
|
||||
"thiserror 1.0.69",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multimap"
|
||||
version = "0.8.3"
|
||||
@@ -5179,7 +5226,6 @@ dependencies = [
|
||||
"criterion",
|
||||
"env_logger",
|
||||
"log",
|
||||
"memoffset 0.9.0",
|
||||
"once_cell",
|
||||
"postgres",
|
||||
"postgres_ffi_types",
|
||||
@@ -5532,7 +5578,6 @@ dependencies = [
|
||||
"futures",
|
||||
"gettid",
|
||||
"hashbrown 0.14.5",
|
||||
"hashlink",
|
||||
"hex",
|
||||
"hmac",
|
||||
"hostname",
|
||||
@@ -5554,6 +5599,7 @@ dependencies = [
|
||||
"lasso",
|
||||
"measured",
|
||||
"metrics",
|
||||
"moka",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"ouroboros",
|
||||
@@ -5620,6 +5666,7 @@ dependencies = [
|
||||
"workspace_hack",
|
||||
"x509-cert",
|
||||
"zerocopy 0.8.24",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6577,6 +6624,12 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scoped-tls"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
@@ -7427,6 +7480,12 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tagptr"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
|
||||
|
||||
[[package]]
|
||||
name = "tar"
|
||||
version = "0.4.40"
|
||||
@@ -8093,11 +8152,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-appender"
|
||||
version = "0.2.2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
|
||||
checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"thiserror 1.0.69",
|
||||
"time",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
@@ -8818,10 +8878,32 @@ version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
|
||||
dependencies = [
|
||||
"windows-core",
|
||||
"windows-core 0.52.0",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.61.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893"
|
||||
dependencies = [
|
||||
"windows-collections",
|
||||
"windows-core 0.61.2",
|
||||
"windows-future",
|
||||
"windows-link",
|
||||
"windows-numerics",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-collections"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
|
||||
dependencies = [
|
||||
"windows-core 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.52.0"
|
||||
@@ -8831,6 +8913,86 @@ dependencies = [
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.61.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
|
||||
dependencies = [
|
||||
"windows-implement",
|
||||
"windows-interface",
|
||||
"windows-link",
|
||||
"windows-result",
|
||||
"windows-strings",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-future"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e"
|
||||
dependencies = [
|
||||
"windows-core 0.61.2",
|
||||
"windows-link",
|
||||
"windows-threading",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-implement"
|
||||
version = "0.60.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-interface"
|
||||
version = "0.59.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-numerics"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
|
||||
dependencies = [
|
||||
"windows-core 0.61.2",
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-strings"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
@@ -8889,6 +9051,15 @@ dependencies = [
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-threading"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.0"
|
||||
@@ -9025,6 +9196,8 @@ dependencies = [
|
||||
"clap",
|
||||
"clap_builder",
|
||||
"const-oid",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"crypto-bigint 0.5.5",
|
||||
"der 0.7.8",
|
||||
"deranged",
|
||||
@@ -9071,6 +9244,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"p256 0.13.2",
|
||||
"parquet",
|
||||
"portable-atomic",
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
"prost 0.13.5",
|
||||
|
||||
10
Cargo.toml
10
Cargo.toml
@@ -46,10 +46,10 @@ members = [
|
||||
"libs/proxy/json",
|
||||
"libs/proxy/postgres-protocol2",
|
||||
"libs/proxy/postgres-types2",
|
||||
"libs/proxy/subzero_core",
|
||||
"libs/proxy/tokio-postgres2",
|
||||
"endpoint_storage",
|
||||
"pgxn/neon/communicator",
|
||||
"proxy/subzero_core",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -135,7 +135,7 @@ lock_api = "0.4.13"
|
||||
md5 = "0.7.0"
|
||||
measured = { version = "0.0.22", features=["lasso"] }
|
||||
measured-process = { version = "0.0.22" }
|
||||
memoffset = "0.9"
|
||||
moka = { version = "0.12", features = ["sync"] }
|
||||
nix = { version = "0.30.1", features = ["dir", "fs", "mman", "process", "socket", "signal", "poll"] }
|
||||
# Do not update to >= 7.0.0, at least. The update will have a significant impact
|
||||
# on compute startup metrics (start_postgres_ms), >= 25% degradation.
|
||||
@@ -146,7 +146,7 @@ oid-registry = "0.7.1"
|
||||
once_cell = "1.13"
|
||||
opentelemetry = "0.30"
|
||||
opentelemetry_sdk = "0.30"
|
||||
opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-blocking-client"] }
|
||||
opentelemetry-semantic-conventions = "0.30"
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
||||
@@ -224,6 +224,7 @@ tracing-log = "0.2"
|
||||
tracing-opentelemetry = "0.31"
|
||||
tracing-serde = "0.2.0"
|
||||
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
||||
tracing-appender = "0.2.3"
|
||||
try-lock = "0.2.5"
|
||||
test-log = { version = "0.2.17", default-features = false, features = ["log"] }
|
||||
twox-hash = { version = "1.6.3", default-features = false }
|
||||
@@ -234,10 +235,11 @@ uuid = { version = "1.6.1", features = ["v4", "v7", "serde"] }
|
||||
walkdir = "2.3.2"
|
||||
rustls-native-certs = "0.8"
|
||||
whoami = "1.5.1"
|
||||
zerocopy = { version = "0.8", features = ["derive", "simd"] }
|
||||
json-structural-diff = { version = "0.2.0" }
|
||||
x509-cert = { version = "0.2.5" }
|
||||
x509-parser = "0.16"
|
||||
zerocopy = { version = "0.8", features = ["derive", "simd"] }
|
||||
zeroize = "1.8"
|
||||
|
||||
## TODO replace this with tracing
|
||||
env_logger = "0.11"
|
||||
|
||||
@@ -39,13 +39,13 @@ COPY build-tools/patches/pgcopydbv017.patch /pgcopydbv017.patch
|
||||
|
||||
RUN if [ "${DEBIAN_VERSION}" = "bookworm" ]; then \
|
||||
set -e && \
|
||||
apt update && \
|
||||
apt install -y --no-install-recommends \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates wget gpg && \
|
||||
wget -qO - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-keyring.gpg && \
|
||||
echo "deb [signed-by=/usr/share/keyrings/postgresql-keyring.gpg] http://apt.postgresql.org/pub/repos/apt bookworm-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
|
||||
apt-get update && \
|
||||
apt install -y --no-install-recommends \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
autotools-dev \
|
||||
libedit-dev \
|
||||
@@ -89,8 +89,7 @@ RUN useradd -ms /bin/bash nonroot -b /home
|
||||
# Use strict mode for bash to catch errors early
|
||||
SHELL ["/bin/bash", "-euo", "pipefail", "-c"]
|
||||
|
||||
RUN mkdir -p /pgcopydb/bin && \
|
||||
mkdir -p /pgcopydb/lib && \
|
||||
RUN mkdir -p /pgcopydb/{bin,lib} && \
|
||||
chmod -R 755 /pgcopydb && \
|
||||
chown -R nonroot:nonroot /pgcopydb
|
||||
|
||||
@@ -106,8 +105,8 @@ RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries && \
|
||||
# 'gdb' is included so that we get backtraces of core dumps produced in
|
||||
# regression tests
|
||||
RUN set -e \
|
||||
&& apt update \
|
||||
&& apt install -y \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
autoconf \
|
||||
automake \
|
||||
bison \
|
||||
@@ -183,22 +182,22 @@ RUN curl -sL "https://github.com/peak/s5cmd/releases/download/v${S5CMD_VERSION}/
|
||||
ENV LLVM_VERSION=20
|
||||
RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \
|
||||
&& echo "deb http://apt.llvm.org/${DEBIAN_VERSION}/ llvm-toolchain-${DEBIAN_VERSION}-${LLVM_VERSION} main" > /etc/apt/sources.list.d/llvm.stable.list \
|
||||
&& apt update \
|
||||
&& apt install -y clang-${LLVM_VERSION} llvm-${LLVM_VERSION} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends clang-${LLVM_VERSION} llvm-${LLVM_VERSION} \
|
||||
&& bash -c 'for f in /usr/bin/clang*-${LLVM_VERSION} /usr/bin/llvm*-${LLVM_VERSION}; do ln -s "${f}" "${f%-${LLVM_VERSION}}"; done' \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Install node
|
||||
ENV NODE_VERSION=24
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - \
|
||||
&& apt install -y nodejs \
|
||||
&& apt-get install -y --no-install-recommends nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Install docker
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian ${DEBIAN_VERSION} stable" > /etc/apt/sources.list.d/docker.list \
|
||||
&& apt update \
|
||||
&& apt install -y docker-ce docker-ce-cli \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends docker-ce docker-ce-cli \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Configure sudo & docker
|
||||
@@ -215,12 +214,11 @@ RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "aws
|
||||
# Mold: A Modern Linker
|
||||
ENV MOLD_VERSION=v2.37.1
|
||||
RUN set -e \
|
||||
&& git clone https://github.com/rui314/mold.git \
|
||||
&& git clone -b "${MOLD_VERSION}" --depth 1 https://github.com/rui314/mold.git \
|
||||
&& mkdir mold/build \
|
||||
&& cd mold/build \
|
||||
&& git checkout ${MOLD_VERSION} \
|
||||
&& cd mold/build \
|
||||
&& cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang++ .. \
|
||||
&& cmake --build . -j $(nproc) \
|
||||
&& cmake --build . -j "$(nproc)" \
|
||||
&& cmake --install . \
|
||||
&& cd .. \
|
||||
&& rm -rf mold
|
||||
@@ -254,7 +252,7 @@ ENV ICU_VERSION=67.1
|
||||
ENV ICU_PREFIX=/usr/local/icu
|
||||
|
||||
# Download and build static ICU
|
||||
RUN wget -O /tmp/libicu-${ICU_VERSION}.tgz https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \
|
||||
RUN wget -O "/tmp/libicu-${ICU_VERSION}.tgz" https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \
|
||||
echo "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc /tmp/libicu-${ICU_VERSION}.tgz" | sha256sum --check && \
|
||||
mkdir /tmp/icu && \
|
||||
pushd /tmp/icu && \
|
||||
@@ -265,8 +263,7 @@ RUN wget -O /tmp/libicu-${ICU_VERSION}.tgz https://github.com/unicode-org/icu/re
|
||||
make install && \
|
||||
popd && \
|
||||
rm -rf icu && \
|
||||
rm -f /tmp/libicu-${ICU_VERSION}.tgz && \
|
||||
popd
|
||||
rm -f /tmp/libicu-${ICU_VERSION}.tgz
|
||||
|
||||
# Switch to nonroot user
|
||||
USER nonroot:nonroot
|
||||
@@ -279,19 +276,19 @@ ENV PYTHON_VERSION=3.11.12 \
|
||||
PYENV_ROOT=/home/nonroot/.pyenv \
|
||||
PATH=/home/nonroot/.pyenv/shims:/home/nonroot/.pyenv/bin:/home/nonroot/.poetry/bin:$PATH
|
||||
RUN set -e \
|
||||
&& cd $HOME \
|
||||
&& cd "$HOME" \
|
||||
&& curl -sSO https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer \
|
||||
&& chmod +x pyenv-installer \
|
||||
&& ./pyenv-installer \
|
||||
&& export PYENV_ROOT=/home/nonroot/.pyenv \
|
||||
&& export PATH="$PYENV_ROOT/bin:$PATH" \
|
||||
&& export PATH="$PYENV_ROOT/shims:$PATH" \
|
||||
&& pyenv install ${PYTHON_VERSION} \
|
||||
&& pyenv global ${PYTHON_VERSION} \
|
||||
&& pyenv install "${PYTHON_VERSION}" \
|
||||
&& pyenv global "${PYTHON_VERSION}" \
|
||||
&& python --version \
|
||||
&& pip install --upgrade pip \
|
||||
&& pip install --no-cache-dir --upgrade pip \
|
||||
&& pip --version \
|
||||
&& pip install pipenv wheel poetry
|
||||
&& pip install --no-cache-dir pipenv wheel poetry
|
||||
|
||||
# Switch to nonroot user (again)
|
||||
USER nonroot:nonroot
|
||||
@@ -317,13 +314,13 @@ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux
|
||||
. "$HOME/.cargo/env" && \
|
||||
cargo --version && rustup --version && \
|
||||
rustup component add llvm-tools rustfmt clippy && \
|
||||
cargo install rustfilt --locked --version ${RUSTFILT_VERSION} && \
|
||||
cargo install cargo-hakari --locked --version ${CARGO_HAKARI_VERSION} && \
|
||||
cargo install cargo-deny --locked --version ${CARGO_DENY_VERSION} && \
|
||||
cargo install cargo-hack --locked --version ${CARGO_HACK_VERSION} && \
|
||||
cargo install cargo-nextest --locked --version ${CARGO_NEXTEST_VERSION} && \
|
||||
cargo install cargo-chef --locked --version ${CARGO_CHEF_VERSION} && \
|
||||
cargo install diesel_cli --locked --version ${CARGO_DIESEL_CLI_VERSION} \
|
||||
cargo install rustfilt --locked --version "${RUSTFILT_VERSION}" && \
|
||||
cargo install cargo-hakari --locked --version "${CARGO_HAKARI_VERSION}" && \
|
||||
cargo install cargo-deny --locked --version "${CARGO_DENY_VERSION}" && \
|
||||
cargo install cargo-hack --locked --version "${CARGO_HACK_VERSION}" && \
|
||||
cargo install cargo-nextest --locked --version "${CARGO_NEXTEST_VERSION}" && \
|
||||
cargo install cargo-chef --locked --version "${CARGO_CHEF_VERSION}" && \
|
||||
cargo install diesel_cli --locked --version "${CARGO_DIESEL_CLI_VERSION}" \
|
||||
--features postgres-bundled --no-default-features && \
|
||||
rm -rf /home/nonroot/.cargo/registry && \
|
||||
rm -rf /home/nonroot/.cargo/git
|
||||
|
||||
51
build-tools/package-lock.json
generated
51
build-tools/package-lock.json
generated
@@ -6,7 +6,7 @@
|
||||
"": {
|
||||
"name": "build-tools",
|
||||
"devDependencies": {
|
||||
"@redocly/cli": "1.34.4",
|
||||
"@redocly/cli": "1.34.5",
|
||||
"@sourcemeta/jsonschema": "10.0.0"
|
||||
}
|
||||
},
|
||||
@@ -472,9 +472,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@redocly/cli": {
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.4.tgz",
|
||||
"integrity": "sha512-seH/GgrjSB1EeOsgJ/4Ct6Jk2N7sh12POn/7G8UQFARMyUMJpe1oHtBwT2ndfp4EFCpgBAbZ/82Iw6dwczNxEA==",
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.5.tgz",
|
||||
"integrity": "sha512-5IEwxs7SGP5KEXjBKLU8Ffdz9by/KqNSeBk6YUVQaGxMXK//uYlTJIPntgUXbo1KAGG2d2q2XF8y4iFz6qNeiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -484,14 +484,14 @@
|
||||
"@opentelemetry/sdk-trace-node": "1.26.0",
|
||||
"@opentelemetry/semantic-conventions": "1.27.0",
|
||||
"@redocly/config": "^0.22.0",
|
||||
"@redocly/openapi-core": "1.34.4",
|
||||
"@redocly/respect-core": "1.34.4",
|
||||
"@redocly/openapi-core": "1.34.5",
|
||||
"@redocly/respect-core": "1.34.5",
|
||||
"abort-controller": "^3.0.0",
|
||||
"chokidar": "^3.5.1",
|
||||
"colorette": "^1.2.0",
|
||||
"core-js": "^3.32.1",
|
||||
"dotenv": "16.4.7",
|
||||
"form-data": "^4.0.0",
|
||||
"form-data": "^4.0.4",
|
||||
"get-port-please": "^3.0.1",
|
||||
"glob": "^7.1.6",
|
||||
"handlebars": "^4.7.6",
|
||||
@@ -522,9 +522,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@redocly/openapi-core": {
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.4.tgz",
|
||||
"integrity": "sha512-hf53xEgpXIgWl3b275PgZU3OTpYh1RoD2LHdIfQ1JzBNTWsiNKczTEsI/4Tmh2N1oq9YcphhSMyk3lDh85oDjg==",
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.5.tgz",
|
||||
"integrity": "sha512-0EbE8LRbkogtcCXU7liAyC00n9uNG9hJ+eMyHFdUsy9lB/WGqnEBgwjA9q2cyzAVcdTkQqTBBU1XePNnN3OijA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -544,21 +544,21 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@redocly/respect-core": {
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.4.tgz",
|
||||
"integrity": "sha512-MitKyKyQpsizA4qCVv+MjXL4WltfhFQAoiKiAzrVR1Kusro3VhYb6yJuzoXjiJhR0ukLP5QOP19Vcs7qmj9dZg==",
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.5.tgz",
|
||||
"integrity": "sha512-GheC/g/QFztPe9UA9LamooSplQuy9pe0Yr8XGTqkz0ahivLDl7svoy/LSQNn1QH3XGtLKwFYMfTwFR2TAYyh5Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "^7.6.0",
|
||||
"@redocly/ajv": "8.11.2",
|
||||
"@redocly/openapi-core": "1.34.4",
|
||||
"@redocly/openapi-core": "1.34.5",
|
||||
"better-ajv-errors": "^1.2.0",
|
||||
"colorette": "^2.0.20",
|
||||
"concat-stream": "^2.0.0",
|
||||
"cookie": "^0.7.2",
|
||||
"dotenv": "16.4.7",
|
||||
"form-data": "4.0.0",
|
||||
"form-data": "^4.0.4",
|
||||
"jest-diff": "^29.3.1",
|
||||
"jest-matcher-utils": "^29.3.1",
|
||||
"js-yaml": "4.1.0",
|
||||
@@ -582,21 +582,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@redocly/respect-core/node_modules/form-data": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/@sinclair/typebox": {
|
||||
"version": "0.27.8",
|
||||
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
|
||||
@@ -1345,9 +1330,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz",
|
||||
"integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "build-tools",
|
||||
"private": true,
|
||||
"devDependencies": {
|
||||
"@redocly/cli": "1.34.4",
|
||||
"@redocly/cli": "1.34.5",
|
||||
"@sourcemeta/jsonschema": "10.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,13 @@ commands:
|
||||
- name: postgres-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
# Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see
|
||||
# https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29
|
||||
# but it's enabled by default and it doesn't filter out invalid databases, see
|
||||
# https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67
|
||||
# so if it hits one, it starts spamming logs
|
||||
# ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -26,7 +26,13 @@ commands:
|
||||
- name: postgres-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
# Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see
|
||||
# https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29
|
||||
# but it's enabled by default and it doesn't filter out invalid databases, see
|
||||
# https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67
|
||||
# so if it hits one, it starts spamming logs
|
||||
# ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -62,6 +62,7 @@ tokio-stream.workspace = true
|
||||
tonic.workspace = true
|
||||
tower-otel.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-utils.workspace = true
|
||||
|
||||
@@ -52,8 +52,14 @@ stateDiagram-v2
|
||||
Init --> Running : Started Postgres
|
||||
Running --> TerminationPendingFast : Requested termination
|
||||
Running --> TerminationPendingImmediate : Requested termination
|
||||
Running --> ConfigurationPending : Received a /configure request with spec
|
||||
Running --> RefreshConfigurationPending : Received a /refresh_configuration request, compute node will pull a new spec and reconfigure
|
||||
RefreshConfigurationPending --> RefreshConfiguration: Received compute spec and started configuration
|
||||
RefreshConfiguration --> Running : Compute has been re-configured
|
||||
RefreshConfiguration --> RefreshConfigurationPending : Configuration failed and to be retried
|
||||
TerminationPendingFast --> Terminated compute with 30s delay for cplane to inspect status
|
||||
TerminationPendingImmediate --> Terminated : Terminated compute immediately
|
||||
Failed --> RefreshConfigurationPending : Received a /refresh_configuration request
|
||||
Failed --> [*] : Compute exited
|
||||
Terminated --> [*] : Compute exited
|
||||
```
|
||||
|
||||
@@ -49,9 +49,10 @@ use compute_tools::compute::{
|
||||
BUILD_TAG, ComputeNode, ComputeNodeParams, forward_termination_signal,
|
||||
};
|
||||
use compute_tools::extension_server::get_pg_version_string;
|
||||
use compute_tools::logger::*;
|
||||
use compute_tools::params::*;
|
||||
use compute_tools::pg_isready::get_pg_isready_bin;
|
||||
use compute_tools::spec::*;
|
||||
use compute_tools::{hadron_metrics, installed_extensions, logger::*};
|
||||
use rlimit::{Resource, setrlimit};
|
||||
use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM};
|
||||
use signal_hook::iterator::Signals;
|
||||
@@ -194,11 +195,19 @@ fn main() -> Result<()> {
|
||||
.build()?;
|
||||
let _rt_guard = runtime.enter();
|
||||
|
||||
let tracing_provider = init(cli.dev)?;
|
||||
let mut log_dir = None;
|
||||
if cli.lakebase_mode {
|
||||
log_dir = std::env::var("COMPUTE_CTL_LOG_DIRECTORY").ok();
|
||||
}
|
||||
|
||||
let (tracing_provider, _file_logs_guard) = init(cli.dev, log_dir)?;
|
||||
|
||||
// enable core dumping for all child processes
|
||||
setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
|
||||
|
||||
installed_extensions::initialize_metrics();
|
||||
hadron_metrics::initialize_metrics();
|
||||
|
||||
let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?;
|
||||
|
||||
let config = get_config(&cli)?;
|
||||
@@ -226,7 +235,12 @@ fn main() -> Result<()> {
|
||||
cli.installed_extensions_collection_interval,
|
||||
)),
|
||||
pg_init_timeout: cli.pg_init_timeout.map(Duration::from_secs),
|
||||
pg_isready_bin: get_pg_isready_bin(&cli.pgbin),
|
||||
instance_id: std::env::var("INSTANCE_ID").ok(),
|
||||
lakebase_mode: cli.lakebase_mode,
|
||||
build_tag: BUILD_TAG.to_string(),
|
||||
control_plane_uri: cli.control_plane_uri,
|
||||
config_path_test_only: cli.config,
|
||||
},
|
||||
config,
|
||||
)?;
|
||||
@@ -238,8 +252,14 @@ fn main() -> Result<()> {
|
||||
deinit_and_exit(tracing_provider, exit_code);
|
||||
}
|
||||
|
||||
fn init(dev_mode: bool) -> Result<Option<tracing_utils::Provider>> {
|
||||
let provider = init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||
fn init(
|
||||
dev_mode: bool,
|
||||
log_dir: Option<String>,
|
||||
) -> Result<(
|
||||
Option<tracing_utils::Provider>,
|
||||
Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
)> {
|
||||
let (provider, file_logs_guard) = init_tracing_and_logging(DEFAULT_LOG_LEVEL, &log_dir)?;
|
||||
|
||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||
thread::spawn(move || {
|
||||
@@ -250,7 +270,7 @@ fn init(dev_mode: bool) -> Result<Option<tracing_utils::Provider>> {
|
||||
|
||||
info!("compute build_tag: {}", &BUILD_TAG.to_string());
|
||||
|
||||
Ok(provider)
|
||||
Ok((provider, file_logs_guard))
|
||||
}
|
||||
|
||||
fn get_config(cli: &Cli) -> Result<ComputeConfig> {
|
||||
|
||||
@@ -21,6 +21,7 @@ use postgres::NoTls;
|
||||
use postgres::error::SqlState;
|
||||
use remote_storage::{DownloadError, RemotePath};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::ffi::OsString;
|
||||
use std::os::unix::fs::{PermissionsExt, symlink};
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Stdio};
|
||||
@@ -40,8 +41,9 @@ use utils::shard::{ShardCount, ShardIndex, ShardNumber};
|
||||
|
||||
use crate::configurator::launch_configurator;
|
||||
use crate::disk_quota::set_disk_quota;
|
||||
use crate::hadron_metrics::COMPUTE_ATTACHED;
|
||||
use crate::installed_extensions::get_installed_extensions;
|
||||
use crate::logger::startup_context_from_env;
|
||||
use crate::logger::{self, startup_context_from_env};
|
||||
use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
||||
use crate::metrics::COMPUTE_CTL_UP;
|
||||
use crate::monitor::launch_monitor;
|
||||
@@ -113,11 +115,17 @@ pub struct ComputeNodeParams {
|
||||
|
||||
/// Interval for installed extensions collection
|
||||
pub installed_extensions_collection_interval: Arc<AtomicU64>,
|
||||
|
||||
/// Hadron instance ID of the compute node.
|
||||
pub instance_id: Option<String>,
|
||||
/// Timeout of PG compute startup in the Init state.
|
||||
pub pg_init_timeout: Option<Duration>,
|
||||
|
||||
// Path to the `pg_isready` binary.
|
||||
pub pg_isready_bin: String,
|
||||
pub lakebase_mode: bool,
|
||||
|
||||
pub build_tag: String,
|
||||
pub control_plane_uri: Option<String>,
|
||||
pub config_path_test_only: Option<OsString>,
|
||||
}
|
||||
|
||||
type TaskHandle = Mutex<Option<JoinHandle<()>>>;
|
||||
@@ -405,6 +413,52 @@ struct StartVmMonitorResult {
|
||||
vm_monitor: Option<JoinHandle<Result<()>>>,
|
||||
}
|
||||
|
||||
/// Databricks-specific environment variables to be passed to the `postgres` sub-process.
|
||||
pub struct DatabricksEnvVars {
|
||||
/// The Databricks "endpoint ID" of the compute instance. Used by `postgres` to check
|
||||
/// the token scopes of internal auth tokens.
|
||||
pub endpoint_id: String,
|
||||
/// Hostname of the Databricks workspace URL this compute instance belongs to.
|
||||
/// Used by postgres to verify Databricks PAT tokens.
|
||||
pub workspace_host: String,
|
||||
}
|
||||
|
||||
impl DatabricksEnvVars {
|
||||
pub fn new(compute_spec: &ComputeSpec, compute_id: Option<&String>) -> Self {
|
||||
// compute_id is a string format of "{endpoint_id}/{compute_idx}"
|
||||
// endpoint_id is a uuid. We only need to pass down endpoint_id to postgres.
|
||||
// Panics if compute_id is not set or not in the expected format.
|
||||
let endpoint_id = compute_id.unwrap().split('/').next().unwrap().to_string();
|
||||
let workspace_host = compute_spec
|
||||
.databricks_settings
|
||||
.as_ref()
|
||||
.map(|s| s.databricks_workspace_host.clone())
|
||||
.unwrap_or("".to_string());
|
||||
Self {
|
||||
endpoint_id,
|
||||
workspace_host,
|
||||
}
|
||||
}
|
||||
|
||||
/// Constants for the names of Databricks-specific postgres environment variables.
|
||||
const DATABRICKS_ENDPOINT_ID_ENVVAR: &'static str = "DATABRICKS_ENDPOINT_ID";
|
||||
const DATABRICKS_WORKSPACE_HOST_ENVVAR: &'static str = "DATABRICKS_WORKSPACE_HOST";
|
||||
|
||||
/// Convert DatabricksEnvVars to a list of string pairs that can be passed as env vars. Consumes `self`.
|
||||
pub fn to_env_var_list(self) -> Vec<(String, String)> {
|
||||
vec![
|
||||
(
|
||||
Self::DATABRICKS_ENDPOINT_ID_ENVVAR.to_string(),
|
||||
self.endpoint_id.clone(),
|
||||
),
|
||||
(
|
||||
Self::DATABRICKS_WORKSPACE_HOST_ENVVAR.to_string(),
|
||||
self.workspace_host.clone(),
|
||||
),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl ComputeNode {
|
||||
pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
|
||||
let connstr = params.connstr.as_str();
|
||||
@@ -486,6 +540,7 @@ impl ComputeNode {
|
||||
port: this.params.external_http_port,
|
||||
config: this.compute_ctl_config.clone(),
|
||||
compute_id: this.params.compute_id.clone(),
|
||||
instance_id: this.params.instance_id.clone(),
|
||||
}
|
||||
.launch(&this);
|
||||
|
||||
@@ -1402,6 +1457,8 @@ impl ComputeNode {
|
||||
let pgdata_path = Path::new(&self.params.pgdata);
|
||||
|
||||
let tls_config = self.tls_config(&pspec.spec);
|
||||
let databricks_settings = spec.databricks_settings.as_ref();
|
||||
let postgres_port = self.params.connstr.port();
|
||||
|
||||
// Remove/create an empty pgdata directory and put configuration there.
|
||||
self.create_pgdata()?;
|
||||
@@ -1409,8 +1466,11 @@ impl ComputeNode {
|
||||
pgdata_path,
|
||||
&self.params,
|
||||
&pspec.spec,
|
||||
postgres_port,
|
||||
self.params.internal_http_port,
|
||||
tls_config,
|
||||
databricks_settings,
|
||||
self.params.lakebase_mode,
|
||||
)?;
|
||||
|
||||
// Syncing safekeepers is only safe with primary nodes: if a primary
|
||||
@@ -1450,8 +1510,28 @@ impl ComputeNode {
|
||||
)
|
||||
})?;
|
||||
|
||||
// Update pg_hba.conf received with basebackup.
|
||||
update_pg_hba(pgdata_path, None)?;
|
||||
if let Some(settings) = databricks_settings {
|
||||
copy_tls_certificates(
|
||||
&settings.pg_compute_tls_settings.key_file,
|
||||
&settings.pg_compute_tls_settings.cert_file,
|
||||
pgdata_path,
|
||||
)?;
|
||||
|
||||
// Update pg_hba.conf received with basebackup including additional databricks settings.
|
||||
update_pg_hba(pgdata_path, Some(&settings.databricks_pg_hba))?;
|
||||
update_pg_ident(pgdata_path, Some(&settings.databricks_pg_ident))?;
|
||||
} else {
|
||||
// Update pg_hba.conf received with basebackup.
|
||||
update_pg_hba(pgdata_path, None)?;
|
||||
}
|
||||
|
||||
if let Some(databricks_settings) = spec.databricks_settings.as_ref() {
|
||||
copy_tls_certificates(
|
||||
&databricks_settings.pg_compute_tls_settings.key_file,
|
||||
&databricks_settings.pg_compute_tls_settings.cert_file,
|
||||
pgdata_path,
|
||||
)?;
|
||||
}
|
||||
|
||||
// Place pg_dynshmem under /dev/shm. This allows us to use
|
||||
// 'dynamic_shared_memory_type = mmap' so that the files are placed in
|
||||
@@ -1564,14 +1644,31 @@ impl ComputeNode {
|
||||
pub fn start_postgres(&self, storage_auth_token: Option<String>) -> Result<PostgresHandle> {
|
||||
let pgdata_path = Path::new(&self.params.pgdata);
|
||||
|
||||
let env_vars: Vec<(String, String)> = if self.params.lakebase_mode {
|
||||
let databricks_env_vars = {
|
||||
let state = self.state.lock().unwrap();
|
||||
let spec = &state.pspec.as_ref().unwrap().spec;
|
||||
DatabricksEnvVars::new(spec, Some(&self.params.compute_id))
|
||||
};
|
||||
|
||||
info!(
|
||||
"Starting Postgres for databricks endpoint id: {}",
|
||||
&databricks_env_vars.endpoint_id
|
||||
);
|
||||
|
||||
let mut env_vars = databricks_env_vars.to_env_var_list();
|
||||
env_vars.extend(storage_auth_token.map(|t| ("NEON_AUTH_TOKEN".to_string(), t)));
|
||||
env_vars
|
||||
} else if let Some(storage_auth_token) = &storage_auth_token {
|
||||
vec![("NEON_AUTH_TOKEN".to_owned(), storage_auth_token.to_owned())]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
// Run postgres as a child process.
|
||||
let mut pg = maybe_cgexec(&self.params.pgbin)
|
||||
.args(["-D", &self.params.pgdata])
|
||||
.envs(if let Some(storage_auth_token) = &storage_auth_token {
|
||||
vec![("NEON_AUTH_TOKEN", storage_auth_token)]
|
||||
} else {
|
||||
vec![]
|
||||
})
|
||||
.envs(env_vars)
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.expect("cannot start postgres process");
|
||||
@@ -1785,6 +1882,34 @@ impl ComputeNode {
|
||||
Ok::<(), anyhow::Error>(())
|
||||
}
|
||||
|
||||
// Signal to the configurator to refresh the configuration by pulling a new spec from the HCC.
|
||||
// Note that this merely triggers a notification on a condition variable the configurator thread
|
||||
// waits on. The configurator thread (in configurator.rs) pulls the new spec from the HCC and
|
||||
// applies it.
|
||||
pub async fn signal_refresh_configuration(&self) -> Result<()> {
|
||||
let states_allowing_configuration_refresh = [
|
||||
ComputeStatus::Running,
|
||||
ComputeStatus::Failed,
|
||||
ComputeStatus::RefreshConfigurationPending,
|
||||
];
|
||||
|
||||
let mut state = self.state.lock().expect("state lock poisoned");
|
||||
if states_allowing_configuration_refresh.contains(&state.status) {
|
||||
state.status = ComputeStatus::RefreshConfigurationPending;
|
||||
self.state_changed.notify_all();
|
||||
Ok(())
|
||||
} else if state.status == ComputeStatus::Init {
|
||||
// If the compute is in Init state, we can't refresh the configuration immediately,
|
||||
// but we should be able to do that soon.
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow::anyhow!(
|
||||
"Cannot refresh compute configuration in state {:?}",
|
||||
state.status
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapped this around `pg_ctl reload`, but right now we don't use
|
||||
// `pg_ctl` for start / stop.
|
||||
#[instrument(skip_all)]
|
||||
@@ -1846,12 +1971,16 @@ impl ComputeNode {
|
||||
|
||||
// Write new config
|
||||
let pgdata_path = Path::new(&self.params.pgdata);
|
||||
let postgres_port = self.params.connstr.port();
|
||||
config::write_postgres_conf(
|
||||
pgdata_path,
|
||||
&self.params,
|
||||
&spec,
|
||||
postgres_port,
|
||||
self.params.internal_http_port,
|
||||
tls_config,
|
||||
spec.databricks_settings.as_ref(),
|
||||
self.params.lakebase_mode,
|
||||
)?;
|
||||
|
||||
self.pg_reload_conf()?;
|
||||
@@ -1957,6 +2086,8 @@ impl ComputeNode {
|
||||
// wait
|
||||
ComputeStatus::Init
|
||||
| ComputeStatus::Configuration
|
||||
| ComputeStatus::RefreshConfiguration
|
||||
| ComputeStatus::RefreshConfigurationPending
|
||||
| ComputeStatus::Empty => {
|
||||
state = self.state_changed.wait(state).unwrap();
|
||||
}
|
||||
@@ -2513,6 +2644,34 @@ LIMIT 100",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the compute spec and update related metrics.
|
||||
/// This is the central place where pspec is updated.
|
||||
pub fn set_spec(params: &ComputeNodeParams, state: &mut ComputeState, pspec: ParsedSpec) {
|
||||
state.pspec = Some(pspec);
|
||||
ComputeNode::update_attached_metric(params, state);
|
||||
let _ = logger::update_ids(¶ms.instance_id, &Some(params.compute_id.clone()));
|
||||
}
|
||||
|
||||
pub fn update_attached_metric(params: &ComputeNodeParams, state: &mut ComputeState) {
|
||||
// Update the pg_cctl_attached gauge when all identifiers are available.
|
||||
if let Some(instance_id) = ¶ms.instance_id {
|
||||
if let Some(pspec) = &state.pspec {
|
||||
// Clear all values in the metric
|
||||
COMPUTE_ATTACHED.reset();
|
||||
|
||||
// Set new metric value
|
||||
COMPUTE_ATTACHED
|
||||
.with_label_values(&[
|
||||
¶ms.compute_id,
|
||||
instance_id,
|
||||
&pspec.tenant_id.to_string(),
|
||||
&pspec.timeline_id.to_string(),
|
||||
])
|
||||
.set(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
|
||||
|
||||
@@ -90,6 +90,7 @@ impl ComputeNode {
|
||||
}
|
||||
|
||||
/// If there is a prewarm request ongoing, return `false`, `true` otherwise.
|
||||
/// Has a failpoint "compute-prewarm"
|
||||
pub fn prewarm_lfc(self: &Arc<Self>, from_endpoint: Option<String>) -> bool {
|
||||
{
|
||||
let state = &mut self.state.lock().unwrap().lfc_prewarm_state;
|
||||
@@ -112,9 +113,8 @@ impl ComputeNode {
|
||||
Err(err) => {
|
||||
crate::metrics::LFC_PREWARM_ERRORS.inc();
|
||||
error!(%err, "could not prewarm LFC");
|
||||
|
||||
LfcPrewarmState::Failed {
|
||||
error: err.to_string(),
|
||||
error: format!("{err:#}"),
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -135,16 +135,20 @@ impl ComputeNode {
|
||||
async fn prewarm_impl(&self, from_endpoint: Option<String>) -> Result<bool> {
|
||||
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(from_endpoint)?;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
fail::fail_point!("compute-prewarm", |_| {
|
||||
bail!("prewarm configured to fail because of a failpoint")
|
||||
});
|
||||
|
||||
info!(%url, "requesting LFC state from endpoint storage");
|
||||
let request = Client::new().get(&url).bearer_auth(token);
|
||||
let res = request.send().await.context("querying endpoint storage")?;
|
||||
let status = res.status();
|
||||
match status {
|
||||
match res.status() {
|
||||
StatusCode::OK => (),
|
||||
StatusCode::NOT_FOUND => {
|
||||
return Ok(false);
|
||||
}
|
||||
_ => bail!("{status} querying endpoint storage"),
|
||||
status => bail!("{status} querying endpoint storage"),
|
||||
}
|
||||
|
||||
let mut uncompressed = Vec::new();
|
||||
@@ -205,7 +209,7 @@ impl ComputeNode {
|
||||
crate::metrics::LFC_OFFLOAD_ERRORS.inc();
|
||||
error!(%err, "could not offload LFC state to endpoint storage");
|
||||
self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed {
|
||||
error: err.to_string(),
|
||||
error: format!("{err:#}"),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -213,16 +217,22 @@ impl ComputeNode {
|
||||
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?;
|
||||
info!(%url, "requesting LFC state from Postgres");
|
||||
|
||||
let mut compressed = Vec::new();
|
||||
ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
|
||||
let row = ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
|
||||
.await
|
||||
.context("connecting to postgres")?
|
||||
.query_one("select neon.get_local_cache_state()", &[])
|
||||
.await
|
||||
.context("querying LFC state")?
|
||||
.try_get::<usize, &[u8]>(0)
|
||||
.context("deserializing LFC state")
|
||||
.map(ZstdEncoder::new)?
|
||||
.context("querying LFC state")?;
|
||||
let state = row
|
||||
.try_get::<usize, Option<&[u8]>>(0)
|
||||
.context("deserializing LFC state")?;
|
||||
let Some(state) = state else {
|
||||
info!(%url, "empty LFC state, not exporting");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut compressed = Vec::new();
|
||||
ZstdEncoder::new(state)
|
||||
.read_to_end(&mut compressed)
|
||||
.await
|
||||
.context("compressing LFC state")?;
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use crate::compute::ComputeNode;
|
||||
use anyhow::{Context, Result, bail};
|
||||
use compute_api::{
|
||||
responses::{LfcPrewarmState, PromoteState, SafekeepersLsn},
|
||||
spec::ComputeMode,
|
||||
};
|
||||
use compute_api::responses::{LfcPrewarmState, PromoteConfig, PromoteState};
|
||||
use compute_api::spec::ComputeMode;
|
||||
use itertools::Itertools;
|
||||
use std::collections::HashMap;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
use tracing::info;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
impl ComputeNode {
|
||||
@@ -13,21 +14,22 @@ impl ComputeNode {
|
||||
/// and http client disconnects, this does not stop promotion, and subsequent
|
||||
/// calls block until promote finishes.
|
||||
/// Called by control plane on secondary after primary endpoint is terminated
|
||||
pub async fn promote(self: &Arc<Self>, safekeepers_lsn: SafekeepersLsn) -> PromoteState {
|
||||
/// Has a failpoint "compute-promotion"
|
||||
pub async fn promote(self: &Arc<Self>, cfg: PromoteConfig) -> PromoteState {
|
||||
let cloned = self.clone();
|
||||
let promote_fn = async move || {
|
||||
let Err(err) = cloned.promote_impl(cfg).await else {
|
||||
return PromoteState::Completed;
|
||||
};
|
||||
tracing::error!(%err, "promoting");
|
||||
PromoteState::Failed {
|
||||
error: format!("{err:#}"),
|
||||
}
|
||||
};
|
||||
|
||||
let start_promotion = || {
|
||||
let (tx, rx) = tokio::sync::watch::channel(PromoteState::NotPromoted);
|
||||
tokio::spawn(async move {
|
||||
tx.send(match cloned.promote_impl(safekeepers_lsn).await {
|
||||
Ok(_) => PromoteState::Completed,
|
||||
Err(err) => {
|
||||
tracing::error!(%err, "promoting");
|
||||
PromoteState::Failed {
|
||||
error: err.to_string(),
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
tokio::spawn(async move { tx.send(promote_fn().await) });
|
||||
rx
|
||||
};
|
||||
|
||||
@@ -47,9 +49,7 @@ impl ComputeNode {
|
||||
task.borrow().clone()
|
||||
}
|
||||
|
||||
// Why do we have to supply safekeepers?
|
||||
// For secondary we use primary_connection_conninfo so safekeepers field is empty
|
||||
async fn promote_impl(&self, safekeepers_lsn: SafekeepersLsn) -> Result<()> {
|
||||
async fn promote_impl(&self, mut cfg: PromoteConfig) -> Result<()> {
|
||||
{
|
||||
let state = self.state.lock().unwrap();
|
||||
let mode = &state.pspec.as_ref().unwrap().spec.mode;
|
||||
@@ -73,7 +73,7 @@ impl ComputeNode {
|
||||
.await
|
||||
.context("connecting to postgres")?;
|
||||
|
||||
let primary_lsn = safekeepers_lsn.wal_flush_lsn;
|
||||
let primary_lsn = cfg.wal_flush_lsn;
|
||||
let mut last_wal_replay_lsn: Lsn = Lsn::INVALID;
|
||||
const RETRIES: i32 = 20;
|
||||
for i in 0..=RETRIES {
|
||||
@@ -86,7 +86,7 @@ impl ComputeNode {
|
||||
if last_wal_replay_lsn >= primary_lsn {
|
||||
break;
|
||||
}
|
||||
tracing::info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}");
|
||||
info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
if last_wal_replay_lsn < primary_lsn {
|
||||
@@ -96,7 +96,7 @@ impl ComputeNode {
|
||||
// using $1 doesn't work with ALTER SYSTEM SET
|
||||
let safekeepers_sql = format!(
|
||||
"ALTER SYSTEM SET neon.safekeepers='{}'",
|
||||
safekeepers_lsn.safekeepers
|
||||
cfg.spec.safekeeper_connstrings.join(",")
|
||||
);
|
||||
client
|
||||
.query(&safekeepers_sql, &[])
|
||||
@@ -106,6 +106,12 @@ impl ComputeNode {
|
||||
.query("SELECT pg_reload_conf()", &[])
|
||||
.await
|
||||
.context("reloading postgres config")?;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
fail::fail_point!("compute-promotion", |_| {
|
||||
bail!("promotion configured to fail because of a failpoint")
|
||||
});
|
||||
|
||||
let row = client
|
||||
.query_one("SELECT * FROM pg_promote()", &[])
|
||||
.await
|
||||
@@ -125,8 +131,36 @@ impl ComputeNode {
|
||||
bail!("replica in read only mode after promotion");
|
||||
}
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.pspec.as_mut().unwrap().spec.mode = ComputeMode::Primary;
|
||||
Ok(())
|
||||
{
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let spec = &mut state.pspec.as_mut().unwrap().spec;
|
||||
spec.mode = ComputeMode::Primary;
|
||||
let new_conf = cfg.spec.cluster.postgresql_conf.as_mut().unwrap();
|
||||
let existing_conf = spec.cluster.postgresql_conf.as_ref().unwrap();
|
||||
Self::merge_spec(new_conf, existing_conf);
|
||||
}
|
||||
info!("applied new spec, reconfiguring as primary");
|
||||
self.reconfigure()
|
||||
}
|
||||
|
||||
/// Merge old and new Postgres conf specs to apply on secondary.
|
||||
/// Change new spec's port and safekeepers since they are supplied
|
||||
/// differenly
|
||||
fn merge_spec(new_conf: &mut String, existing_conf: &str) {
|
||||
let mut new_conf_set: HashMap<&str, &str> = new_conf
|
||||
.split_terminator('\n')
|
||||
.map(|e| e.split_once("=").expect("invalid item"))
|
||||
.collect();
|
||||
new_conf_set.remove("neon.safekeepers");
|
||||
|
||||
let existing_conf_set: HashMap<&str, &str> = existing_conf
|
||||
.split_terminator('\n')
|
||||
.map(|e| e.split_once("=").expect("invalid item"))
|
||||
.collect();
|
||||
new_conf_set.insert("port", existing_conf_set["port"]);
|
||||
*new_conf = new_conf_set
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,11 +7,14 @@ use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
|
||||
use compute_api::responses::TlsConfig;
|
||||
use compute_api::spec::{ComputeAudit, ComputeMode, ComputeSpec, GenericOption};
|
||||
use compute_api::spec::{
|
||||
ComputeAudit, ComputeMode, ComputeSpec, DatabricksSettings, GenericOption,
|
||||
};
|
||||
|
||||
use crate::compute::ComputeNodeParams;
|
||||
use crate::pg_helpers::{
|
||||
GenericOptionExt, GenericOptionsSearch, PgOptionsSerialize, escape_conf_value,
|
||||
DatabricksSettingsExt as _, GenericOptionExt, GenericOptionsSearch, PgOptionsSerialize,
|
||||
escape_conf_value,
|
||||
};
|
||||
use crate::tls::{self, SERVER_CRT, SERVER_KEY};
|
||||
|
||||
@@ -40,12 +43,16 @@ pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
|
||||
}
|
||||
|
||||
/// Create or completely rewrite configuration file specified by `path`
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn write_postgres_conf(
|
||||
pgdata_path: &Path,
|
||||
params: &ComputeNodeParams,
|
||||
spec: &ComputeSpec,
|
||||
postgres_port: Option<u16>,
|
||||
extension_server_port: u16,
|
||||
tls_config: &Option<TlsConfig>,
|
||||
databricks_settings: Option<&DatabricksSettings>,
|
||||
lakebase_mode: bool,
|
||||
) -> Result<()> {
|
||||
let path = pgdata_path.join("postgresql.conf");
|
||||
// File::create() destroys the file content if it exists.
|
||||
@@ -285,6 +292,24 @@ pub fn write_postgres_conf(
|
||||
writeln!(file, "log_destination='stderr,syslog'")?;
|
||||
}
|
||||
|
||||
if lakebase_mode {
|
||||
// Explicitly set the port based on the connstr, overriding any previous port setting.
|
||||
// Note: It is important that we don't specify a different port again after this.
|
||||
let port = postgres_port.expect("port must be present in connstr");
|
||||
writeln!(file, "port = {port}")?;
|
||||
|
||||
// This is databricks specific settings.
|
||||
// This should be at the end of the file but before `compute_ctl_temp_override.conf` below
|
||||
// so that it can override any settings above.
|
||||
// `compute_ctl_temp_override.conf` is intended to override any settings above during specific operations.
|
||||
// To prevent potential breakage in the future, we keep it above `compute_ctl_temp_override.conf`.
|
||||
writeln!(file, "# Databricks settings start")?;
|
||||
if let Some(settings) = databricks_settings {
|
||||
writeln!(file, "{}", settings.as_pg_settings())?;
|
||||
}
|
||||
writeln!(file, "# Databricks settings end")?;
|
||||
}
|
||||
|
||||
// This is essential to keep this line at the end of the file,
|
||||
// because it is intended to override any settings above.
|
||||
writeln!(file, "include_if_exists = 'compute_ctl_temp_override.conf'")?;
|
||||
|
||||
@@ -1,23 +1,40 @@
|
||||
use std::sync::Arc;
|
||||
use std::fs::File;
|
||||
use std::thread;
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use compute_api::responses::ComputeStatus;
|
||||
use anyhow::Result;
|
||||
use compute_api::responses::{ComputeConfig, ComputeStatus};
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::compute::{ComputeNode, ParsedSpec};
|
||||
use crate::spec::get_config_from_control_plane;
|
||||
|
||||
#[instrument(skip_all)]
|
||||
fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||
info!("waiting for reconfiguration requests");
|
||||
loop {
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
/* BEGIN_HADRON */
|
||||
// RefreshConfiguration should only be used inside the loop
|
||||
assert_ne!(state.status, ComputeStatus::RefreshConfiguration);
|
||||
/* END_HADRON */
|
||||
|
||||
// We have to re-check the status after re-acquiring the lock because it could be that
|
||||
// the status has changed while we were waiting for the lock, and we might not need to
|
||||
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
|
||||
// we are waiting for a condition variable that will never be signaled.
|
||||
if state.status != ComputeStatus::ConfigurationPending {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
if compute.params.lakebase_mode {
|
||||
while state.status != ComputeStatus::ConfigurationPending
|
||||
&& state.status != ComputeStatus::RefreshConfigurationPending
|
||||
&& state.status != ComputeStatus::Failed
|
||||
{
|
||||
info!("configurator: compute status: {:?}, sleeping", state.status);
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
} else {
|
||||
// We have to re-check the status after re-acquiring the lock because it could be that
|
||||
// the status has changed while we were waiting for the lock, and we might not need to
|
||||
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
|
||||
// we are waiting for a condition variable that will never be signaled.
|
||||
if state.status != ComputeStatus::ConfigurationPending {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// Re-check the status after waking up
|
||||
@@ -37,6 +54,133 @@ fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||
// XXX: used to test that API is blocking
|
||||
// std::thread::sleep(std::time::Duration::from_millis(10000));
|
||||
|
||||
compute.set_status(new_status);
|
||||
} else if state.status == ComputeStatus::RefreshConfigurationPending {
|
||||
info!(
|
||||
"compute node suspects its configuration is out of date, now refreshing configuration"
|
||||
);
|
||||
state.set_status(ComputeStatus::RefreshConfiguration, &compute.state_changed);
|
||||
// Drop the lock guard here to avoid holding the lock while downloading config from the control plane / HCC.
|
||||
// This is the only thread that can move compute_ctl out of the `RefreshConfiguration` state, so it
|
||||
// is safe to drop the lock like this.
|
||||
drop(state);
|
||||
|
||||
let get_config_result: anyhow::Result<ComputeConfig> =
|
||||
if let Some(config_path) = &compute.params.config_path_test_only {
|
||||
// This path is only to make testing easier. In production we always get the config from the HCC.
|
||||
info!(
|
||||
"reloading config.json from path: {}",
|
||||
config_path.to_string_lossy()
|
||||
);
|
||||
let path = Path::new(config_path);
|
||||
if let Ok(file) = File::open(path) {
|
||||
match serde_json::from_reader::<File, ComputeConfig>(file) {
|
||||
Ok(config) => Ok(config),
|
||||
Err(e) => {
|
||||
error!("could not parse config file: {}", e);
|
||||
Err(anyhow::anyhow!("could not parse config file: {}", e))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"could not open config file at path: {:?}",
|
||||
config_path.to_string_lossy()
|
||||
);
|
||||
Err(anyhow::anyhow!(
|
||||
"could not open config file at path: {}",
|
||||
config_path.to_string_lossy()
|
||||
))
|
||||
}
|
||||
} else if let Some(control_plane_uri) = &compute.params.control_plane_uri {
|
||||
get_config_from_control_plane(control_plane_uri, &compute.params.compute_id)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("config_path_test_only is not set"))
|
||||
};
|
||||
|
||||
// Parse any received ComputeSpec and transpose the result into a Result<Option<ParsedSpec>>.
|
||||
let parsed_spec_result: Result<Option<ParsedSpec>> =
|
||||
get_config_result.and_then(|config| {
|
||||
if let Some(spec) = config.spec {
|
||||
if let Ok(pspec) = ParsedSpec::try_from(spec) {
|
||||
Ok(Some(pspec))
|
||||
} else {
|
||||
Err(anyhow::anyhow!("could not parse spec"))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
});
|
||||
|
||||
let new_status: ComputeStatus;
|
||||
match parsed_spec_result {
|
||||
// Control plane (HCM) returned a spec and we were able to parse it.
|
||||
Ok(Some(pspec)) => {
|
||||
{
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
// Defensive programming to make sure this thread is indeed the only one that can move the compute
|
||||
// node out of the `RefreshConfiguration` state. Would be nice if we can encode this invariant
|
||||
// into the type system.
|
||||
assert_eq!(state.status, ComputeStatus::RefreshConfiguration);
|
||||
|
||||
if state.pspec.as_ref().map(|ps| ps.pageserver_connstr.clone())
|
||||
== Some(pspec.pageserver_connstr.clone())
|
||||
{
|
||||
info!(
|
||||
"Refresh configuration: Retrieved spec is the same as the current spec. Waiting for control plane to update the spec before attempting reconfiguration."
|
||||
);
|
||||
state.status = ComputeStatus::Running;
|
||||
compute.state_changed.notify_all();
|
||||
drop(state);
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
continue;
|
||||
}
|
||||
// state.pspec is consumed by compute.reconfigure() below. Note that compute.reconfigure() will acquire
|
||||
// the compute.state lock again so we need to have the lock guard go out of scope here. We could add a
|
||||
// "locked" variant of compute.reconfigure() that takes the lock guard as an argument to make this cleaner,
|
||||
// but it's not worth forking the codebase too much for this minor point alone right now.
|
||||
state.pspec = Some(pspec);
|
||||
}
|
||||
match compute.reconfigure() {
|
||||
Ok(_) => {
|
||||
info!("Refresh configuration: compute node configured");
|
||||
new_status = ComputeStatus::Running;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Refresh configuration: could not configure compute node: {}",
|
||||
e
|
||||
);
|
||||
// Set the compute node back to the `RefreshConfigurationPending` state if the configuration
|
||||
// was not successful. It should be okay to treat this situation the same as if the loop
|
||||
// hasn't executed yet as long as the detection side keeps notifying.
|
||||
new_status = ComputeStatus::RefreshConfigurationPending;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Control plane (HCM)'s response does not contain a spec. This is the "Empty" attachment case.
|
||||
Ok(None) => {
|
||||
info!(
|
||||
"Compute Manager signaled that this compute is no longer attached to any storage. Exiting."
|
||||
);
|
||||
// We just immediately terminate the whole compute_ctl in this case. It's not necessary to attempt a
|
||||
// clean shutdown as Postgres is probably not responding anyway (which is why we are in this refresh
|
||||
// configuration state).
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Various error cases:
|
||||
// - The request to the control plane (HCM) either failed or returned a malformed spec.
|
||||
// - compute_ctl itself is configured incorrectly (e.g., compute_id is not set).
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Refresh configuration: error getting a parsed spec: {:?}",
|
||||
e
|
||||
);
|
||||
new_status = ComputeStatus::RefreshConfigurationPending;
|
||||
// We may be dealing with an overloaded HCM if we end up in this path. Backoff 5 seconds before
|
||||
// retrying to avoid hammering the HCM.
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
}
|
||||
}
|
||||
compute.set_status(new_status);
|
||||
} else if state.status == ComputeStatus::Failed {
|
||||
info!("compute node is now in Failed state, exiting");
|
||||
|
||||
@@ -16,12 +16,16 @@ use crate::http::JsonResponse;
|
||||
#[derive(Clone, Debug)]
|
||||
pub(in crate::http) struct Authorize {
|
||||
compute_id: String,
|
||||
// BEGIN HADRON
|
||||
// Hadron instance ID. Only set if it's a Lakebase V1 a.k.a. Hadron instance.
|
||||
instance_id: Option<String>,
|
||||
// END HADRON
|
||||
jwks: JwkSet,
|
||||
validation: Validation,
|
||||
}
|
||||
|
||||
impl Authorize {
|
||||
pub fn new(compute_id: String, jwks: JwkSet) -> Self {
|
||||
pub fn new(compute_id: String, instance_id: Option<String>, jwks: JwkSet) -> Self {
|
||||
let mut validation = Validation::new(Algorithm::EdDSA);
|
||||
|
||||
// BEGIN HADRON
|
||||
@@ -46,6 +50,7 @@ impl Authorize {
|
||||
|
||||
Self {
|
||||
compute_id,
|
||||
instance_id,
|
||||
jwks,
|
||||
validation,
|
||||
}
|
||||
@@ -59,10 +64,20 @@ impl AsyncAuthorizeRequest<Body> for Authorize {
|
||||
|
||||
fn authorize(&mut self, mut request: Request<Body>) -> Self::Future {
|
||||
let compute_id = self.compute_id.clone();
|
||||
let is_hadron_instance = self.instance_id.is_some();
|
||||
let jwks = self.jwks.clone();
|
||||
let validation = self.validation.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
// BEGIN HADRON
|
||||
// In Hadron deployments the "external" HTTP endpoint on compute_ctl can only be
|
||||
// accessed by trusted components (enforced by dblet network policy), so we can bypass
|
||||
// all auth here.
|
||||
if is_hadron_instance {
|
||||
return Ok(request);
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
let TypedHeader(Authorization(bearer)) = request
|
||||
.extract_parts::<TypedHeader<Authorization<Bearer>>>()
|
||||
.await
|
||||
|
||||
@@ -96,7 +96,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/SafekeepersLsn"
|
||||
$ref: "#/components/schemas/ComputeSchemaWithLsn"
|
||||
responses:
|
||||
200:
|
||||
description: Promote succeeded or wasn't started
|
||||
@@ -297,14 +297,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
properties:
|
||||
spec:
|
||||
# XXX: I don't want to explain current spec in the OpenAPI format,
|
||||
# as it could be changed really soon. Consider doing it later.
|
||||
type: object
|
||||
$ref: "#/components/schemas/ComputeSchema"
|
||||
responses:
|
||||
200:
|
||||
description: Compute configuration finished.
|
||||
@@ -591,18 +584,25 @@ components:
|
||||
type: string
|
||||
example: "1.0.0"
|
||||
|
||||
SafekeepersLsn:
|
||||
ComputeSchema:
|
||||
type: object
|
||||
required:
|
||||
- safekeepers
|
||||
- spec
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
ComputeSchemaWithLsn:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
- wal_flush_lsn
|
||||
properties:
|
||||
safekeepers:
|
||||
description: Primary replica safekeepers
|
||||
type: string
|
||||
spec:
|
||||
$ref: "#/components/schemas/ComputeState"
|
||||
wal_flush_lsn:
|
||||
description: Primary last WAL flush LSN
|
||||
type: string
|
||||
description: "last WAL flush LSN"
|
||||
example: "0/028F10D8"
|
||||
|
||||
LfcPrewarmState:
|
||||
type: object
|
||||
|
||||
@@ -43,7 +43,12 @@ pub(in crate::http) async fn configure(
|
||||
// configure request for tracing purposes.
|
||||
state.startup_span = Some(tracing::Span::current());
|
||||
|
||||
state.pspec = Some(pspec);
|
||||
if compute.params.lakebase_mode {
|
||||
ComputeNode::set_spec(&compute.params, &mut state, pspec);
|
||||
} else {
|
||||
state.pspec = Some(pspec);
|
||||
}
|
||||
|
||||
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
34
compute_tools/src/http/routes/hadron_liveness_probe.rs
Normal file
34
compute_tools/src/http/routes/hadron_liveness_probe.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
use crate::pg_isready::pg_isready;
|
||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
||||
use axum::{extract::State, http::StatusCode, response::Response};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// NOTE: NOT ENABLED YET
|
||||
/// Detect if the compute is alive.
|
||||
/// Called by the liveness probe of the compute container.
|
||||
pub(in crate::http) async fn hadron_liveness_probe(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Response {
|
||||
let port = match compute.params.connstr.port() {
|
||||
Some(port) => port,
|
||||
None => {
|
||||
return JsonResponse::error(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Failed to get the port from the connection string",
|
||||
);
|
||||
}
|
||||
};
|
||||
match pg_isready(&compute.params.pg_isready_bin, port) {
|
||||
Ok(_) => {
|
||||
// The connection is successful, so the compute is alive.
|
||||
// Return a 200 OK response.
|
||||
JsonResponse::success(StatusCode::OK, "ok")
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Hadron liveness probe failed: {}", e);
|
||||
// The connection failed, so the compute is not alive.
|
||||
// Return a 500 Internal Server Error response.
|
||||
JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@ use metrics::{Encoder, TextEncoder};
|
||||
|
||||
use crate::communicator_socket_client::connect_communicator_socket;
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::hadron_metrics;
|
||||
use crate::http::JsonResponse;
|
||||
use crate::metrics::collect;
|
||||
|
||||
@@ -21,11 +22,18 @@ pub(in crate::http) async fn get_metrics() -> Response {
|
||||
// When we call TextEncoder::encode() below, it will immediately return an
|
||||
// error if a metric family has no metrics, so we need to preemptively
|
||||
// filter out metric families with no metrics.
|
||||
let metrics = collect()
|
||||
let mut metrics = collect()
|
||||
.into_iter()
|
||||
.filter(|m| !m.get_metric().is_empty())
|
||||
.collect::<Vec<MetricFamily>>();
|
||||
|
||||
// Add Hadron metrics.
|
||||
let hadron_metrics: Vec<MetricFamily> = hadron_metrics::collect()
|
||||
.into_iter()
|
||||
.filter(|m| !m.get_metric().is_empty())
|
||||
.collect();
|
||||
metrics.extend(hadron_metrics);
|
||||
|
||||
let encoder = TextEncoder::new();
|
||||
let mut buffer = vec![];
|
||||
|
||||
|
||||
@@ -10,11 +10,13 @@ pub(in crate::http) mod extension_server;
|
||||
pub(in crate::http) mod extensions;
|
||||
pub(in crate::http) mod failpoints;
|
||||
pub(in crate::http) mod grants;
|
||||
pub(in crate::http) mod hadron_liveness_probe;
|
||||
pub(in crate::http) mod insights;
|
||||
pub(in crate::http) mod lfc;
|
||||
pub(in crate::http) mod metrics;
|
||||
pub(in crate::http) mod metrics_json;
|
||||
pub(in crate::http) mod promote;
|
||||
pub(in crate::http) mod refresh_configuration;
|
||||
pub(in crate::http) mod status;
|
||||
pub(in crate::http) mod terminate;
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use crate::http::JsonResponse;
|
||||
use axum::Form;
|
||||
use axum::extract::Json;
|
||||
use http::StatusCode;
|
||||
|
||||
pub(in crate::http) async fn promote(
|
||||
compute: axum::extract::State<std::sync::Arc<crate::compute::ComputeNode>>,
|
||||
Form(safekeepers_lsn): Form<compute_api::responses::SafekeepersLsn>,
|
||||
Json(cfg): Json<compute_api::responses::PromoteConfig>,
|
||||
) -> axum::response::Response {
|
||||
let state = compute.promote(safekeepers_lsn).await;
|
||||
if let compute_api::responses::PromoteState::Failed { error } = state {
|
||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, error);
|
||||
let state = compute.promote(cfg).await;
|
||||
if let compute_api::responses::PromoteState::Failed { error: _ } = state {
|
||||
return JsonResponse::create_response(StatusCode::INTERNAL_SERVER_ERROR, state);
|
||||
}
|
||||
JsonResponse::success(StatusCode::OK, state)
|
||||
}
|
||||
|
||||
29
compute_tools/src/http/routes/refresh_configuration.rs
Normal file
29
compute_tools/src/http/routes/refresh_configuration.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
// This file is added by Hadron
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::State,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use http::StatusCode;
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS;
|
||||
use crate::http::JsonResponse;
|
||||
|
||||
/// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec
|
||||
/// from the HCC and attempt to reconfigure Postgres with the new spec. The method does not wait
|
||||
/// for the reconfiguration to complete. Rather, it simply delivers a signal that will cause
|
||||
/// configuration to be reloaded in a best effort manner. Invocation of this method does not
|
||||
/// guarantee that a reconfiguration will occur. The caller should consider keep sending this
|
||||
/// request while it believes that the compute configuration is out of date.
|
||||
pub(in crate::http) async fn refresh_configuration(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Response {
|
||||
POSTGRES_PAGESTREAM_REQUEST_ERRORS.inc();
|
||||
match compute.signal_refresh_configuration().await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::compute::{ComputeNode, forward_termination_signal};
|
||||
use crate::http::JsonResponse;
|
||||
use axum::extract::State;
|
||||
use axum::response::Response;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum_extra::extract::OptionalQuery;
|
||||
use compute_api::responses::{ComputeStatus, TerminateMode, TerminateResponse};
|
||||
use http::StatusCode;
|
||||
@@ -33,7 +33,29 @@ pub(in crate::http) async fn terminate(
|
||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
||||
return JsonResponse::invalid_status(state.status);
|
||||
}
|
||||
|
||||
// If compute is Empty, there's no Postgres to terminate. The regular compute_ctl termination path
|
||||
// assumes Postgres to be configured and running, so we just special-handle this case by exiting
|
||||
// the process directly.
|
||||
if compute.params.lakebase_mode && state.status == ComputeStatus::Empty {
|
||||
drop(state);
|
||||
info!("terminating empty compute - will exit process");
|
||||
|
||||
// Queue a task to exit the process after 5 seconds. The 5-second delay aims to
|
||||
// give enough time for the HTTP response to be sent so that HCM doesn't get an abrupt
|
||||
// connection termination.
|
||||
tokio::spawn(async {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
info!("exiting process after terminating empty compute");
|
||||
std::process::exit(0);
|
||||
});
|
||||
|
||||
return StatusCode::OK.into_response();
|
||||
}
|
||||
|
||||
// For Running status, proceed with normal termination
|
||||
state.set_status(mode.into(), &compute.state_changed);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
forward_termination_signal(false);
|
||||
|
||||
@@ -23,7 +23,8 @@ use super::{
|
||||
middleware::authorize::Authorize,
|
||||
routes::{
|
||||
check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions,
|
||||
grants, insights, lfc, metrics, metrics_json, promote, status, terminate,
|
||||
grants, hadron_liveness_probe, insights, lfc, metrics, metrics_json, promote,
|
||||
refresh_configuration, status, terminate,
|
||||
},
|
||||
};
|
||||
use crate::compute::ComputeNode;
|
||||
@@ -43,6 +44,7 @@ pub enum Server {
|
||||
port: u16,
|
||||
config: ComputeCtlConfig,
|
||||
compute_id: String,
|
||||
instance_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -67,7 +69,12 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
post(extension_server::download_extension),
|
||||
)
|
||||
.route("/extensions", post(extensions::install_extension))
|
||||
.route("/grants", post(grants::add_grant));
|
||||
.route("/grants", post(grants::add_grant))
|
||||
// Hadron: Compute-initiated configuration refresh
|
||||
.route(
|
||||
"/refresh_configuration",
|
||||
post(refresh_configuration::refresh_configuration),
|
||||
);
|
||||
|
||||
// Add in any testing support
|
||||
if cfg!(feature = "testing") {
|
||||
@@ -79,7 +86,10 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
router
|
||||
}
|
||||
Server::External {
|
||||
config, compute_id, ..
|
||||
config,
|
||||
compute_id,
|
||||
instance_id,
|
||||
..
|
||||
} => {
|
||||
let unauthenticated_router = Router::<Arc<ComputeNode>>::new()
|
||||
.route("/metrics", get(metrics::get_metrics))
|
||||
@@ -100,8 +110,13 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
.route("/metrics.json", get(metrics_json::get_metrics))
|
||||
.route("/status", get(status::get_status))
|
||||
.route("/terminate", post(terminate::terminate))
|
||||
.route(
|
||||
"/hadron_liveness_probe",
|
||||
get(hadron_liveness_probe::hadron_liveness_probe),
|
||||
)
|
||||
.layer(AsyncRequireAuthorizationLayer::new(Authorize::new(
|
||||
compute_id.clone(),
|
||||
instance_id.clone(),
|
||||
config.jwks.clone(),
|
||||
)));
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::collections::HashMap;
|
||||
|
||||
use anyhow::Result;
|
||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio_postgres::error::Error as PostgresError;
|
||||
use tokio_postgres::{Client, Config, NoTls};
|
||||
|
||||
@@ -119,3 +120,7 @@ pub async fn get_installed_extensions(
|
||||
extensions: extensions_map.into_values().collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn initialize_metrics() {
|
||||
Lazy::force(&INSTALLED_EXTENSIONS);
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ mod migration;
|
||||
pub mod monitor;
|
||||
pub mod params;
|
||||
pub mod pg_helpers;
|
||||
pub mod pg_isready;
|
||||
pub mod pgbouncer;
|
||||
pub mod rsyslog;
|
||||
pub mod spec;
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{LazyLock, RwLock};
|
||||
use tracing::Subscriber;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_appender;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::{fmt, layer::SubscriberExt, registry::LookupSpan};
|
||||
|
||||
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
||||
///
|
||||
@@ -15,16 +18,44 @@ use tracing_subscriber::prelude::*;
|
||||
///
|
||||
pub fn init_tracing_and_logging(
|
||||
default_log_level: &str,
|
||||
) -> anyhow::Result<Option<tracing_utils::Provider>> {
|
||||
log_dir_opt: &Option<String>,
|
||||
) -> anyhow::Result<(
|
||||
Option<tracing_utils::Provider>,
|
||||
Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
)> {
|
||||
// Initialize Logging
|
||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||
|
||||
// Standard output streams
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_writer(std::io::stderr);
|
||||
|
||||
// Logs with file rotation. Files in `$log_dir/pgcctl.yyyy-MM-dd`
|
||||
let (json_to_file_layer, _file_logs_guard) = if let Some(log_dir) = log_dir_opt {
|
||||
std::fs::create_dir_all(log_dir)?;
|
||||
let file_logs_appender = tracing_appender::rolling::RollingFileAppender::builder()
|
||||
.rotation(tracing_appender::rolling::Rotation::DAILY)
|
||||
.filename_prefix("pgcctl")
|
||||
// Lib appends to existing files, so we will keep files for up to 2 days even on restart loops.
|
||||
// At minimum, log-daemon will have 1 day to detect and upload a file (if created right before midnight).
|
||||
.max_log_files(2)
|
||||
.build(log_dir)
|
||||
.expect("Initializing rolling file appender should succeed");
|
||||
let (file_logs_writer, _file_logs_guard) =
|
||||
tracing_appender::non_blocking(file_logs_appender);
|
||||
let json_to_file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.event_format(PgJsonLogShapeFormatter)
|
||||
.with_writer(file_logs_writer);
|
||||
(Some(json_to_file_layer), Some(_file_logs_guard))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
// Initialize OpenTelemetry
|
||||
let provider =
|
||||
tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default());
|
||||
@@ -35,12 +66,13 @@ pub fn init_tracing_and_logging(
|
||||
.with(env_filter)
|
||||
.with(otlp_layer)
|
||||
.with(fmt_layer)
|
||||
.with(json_to_file_layer)
|
||||
.init();
|
||||
tracing::info!("logging and tracing started");
|
||||
|
||||
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||
|
||||
Ok(provider)
|
||||
Ok((provider, _file_logs_guard))
|
||||
}
|
||||
|
||||
/// Replace all newline characters with a special character to make it
|
||||
@@ -95,3 +127,157 @@ pub fn startup_context_from_env() -> Option<opentelemetry::Context> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Track relevant id's
|
||||
const UNKNOWN_IDS: &str = r#""pg_instance_id": "", "pg_compute_id": """#;
|
||||
static IDS: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new(UNKNOWN_IDS.to_string()));
|
||||
|
||||
pub fn update_ids(instance_id: &Option<String>, compute_id: &Option<String>) -> anyhow::Result<()> {
|
||||
let ids = format!(
|
||||
r#""pg_instance_id": "{}", "pg_compute_id": "{}""#,
|
||||
instance_id.as_ref().map(|s| s.as_str()).unwrap_or_default(),
|
||||
compute_id.as_ref().map(|s| s.as_str()).unwrap_or_default()
|
||||
);
|
||||
let mut guard = IDS
|
||||
.write()
|
||||
.map_err(|e| anyhow::anyhow!("Log set id's rwlock poisoned: {}", e))?;
|
||||
*guard = ids;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Massage compute_ctl logs into PG json log shape so we can use the same Lumberjack setup.
|
||||
struct PgJsonLogShapeFormatter;
|
||||
impl<S, N> fmt::format::FormatEvent<S, N> for PgJsonLogShapeFormatter
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> fmt::format::FormatFields<'a> + 'static,
|
||||
{
|
||||
fn format_event(
|
||||
&self,
|
||||
ctx: &fmt::FmtContext<'_, S, N>,
|
||||
mut writer: fmt::format::Writer<'_>,
|
||||
event: &tracing::Event<'_>,
|
||||
) -> std::fmt::Result {
|
||||
// Format values from the event's metadata, and open message string
|
||||
let metadata = event.metadata();
|
||||
{
|
||||
let ids_guard = IDS.read();
|
||||
let ids = ids_guard
|
||||
.as_ref()
|
||||
.map(|guard| guard.as_str())
|
||||
// Surpress so that we don't lose all uploaded/ file logs if something goes super wrong. We would notice the missing id's.
|
||||
.unwrap_or(UNKNOWN_IDS);
|
||||
write!(
|
||||
&mut writer,
|
||||
r#"{{"timestamp": "{}", "error_severity": "{}", "file_name": "{}", "backend_type": "compute_ctl_self", {}, "message": "#,
|
||||
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f GMT"),
|
||||
metadata.level(),
|
||||
metadata.target(),
|
||||
ids
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut message = String::new();
|
||||
let message_writer = fmt::format::Writer::new(&mut message);
|
||||
|
||||
// Gather the message
|
||||
ctx.field_format().format_fields(message_writer, event)?;
|
||||
|
||||
// TODO: any better options than to copy-paste this OSS span formatter?
|
||||
// impl<S, N, T> FormatEvent<S, N> for Format<Full, T>
|
||||
// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/trait.FormatEvent.html#impl-FormatEvent%3CS,+N%3E-for-Format%3CFull,+T%3E
|
||||
|
||||
// write message, close bracket, and new line
|
||||
writeln!(writer, "{}}}", serde_json::to_string(&message).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::{cell::RefCell, io};
|
||||
|
||||
// Use thread_local! instead of Mutex for test isolation
|
||||
thread_local! {
|
||||
static WRITER_OUTPUT: RefCell<String> = const { RefCell::new(String::new()) };
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct StaticStringWriter;
|
||||
|
||||
impl io::Write for StaticStringWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let output = String::from_utf8(buf.to_vec()).expect("Invalid UTF-8 in test output");
|
||||
WRITER_OUTPUT.with(|s| s.borrow_mut().push_str(&output));
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::MakeWriter<'_> for StaticStringWriter {
|
||||
type Writer = Self;
|
||||
|
||||
fn make_writer(&self) -> Self::Writer {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_pg_json_shape_formatter() {
|
||||
// Use a scoped subscriber to prevent global state pollution
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.event_format(PgJsonLogShapeFormatter)
|
||||
.with_writer(StaticStringWriter),
|
||||
);
|
||||
|
||||
let _ = update_ids(&Some("000".to_string()), &Some("111".to_string()));
|
||||
|
||||
// Clear any previous test state
|
||||
WRITER_OUTPUT.with(|s| s.borrow_mut().clear());
|
||||
|
||||
let messages = [
|
||||
"test message",
|
||||
r#"json escape check: name="BatchSpanProcessor.Flush.ExportError" reason="Other(reqwest::Error { kind: Request, url: \"http://localhost:4318/v1/traces\", source: hyper_
|
||||
util::client::legacy::Error(Connect, ConnectError(\"tcp connect error\", Os { code: 111, kind: ConnectionRefused, message: \"Connection refused\" })) })" Failed during the export process"#,
|
||||
];
|
||||
|
||||
tracing::subscriber::with_default(subscriber, || {
|
||||
for message in messages {
|
||||
tracing::info!(message);
|
||||
}
|
||||
});
|
||||
tracing::info!("not test message");
|
||||
|
||||
// Get captured output
|
||||
let output = WRITER_OUTPUT.with(|s| s.borrow().clone());
|
||||
|
||||
let json_strings: Vec<&str> = output.lines().collect();
|
||||
assert_eq!(
|
||||
json_strings.len(),
|
||||
messages.len(),
|
||||
"Log didn't have the expected number of json strings."
|
||||
);
|
||||
|
||||
let json_string_shape_regex = regex::Regex::new(
|
||||
r#"\{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} GMT", "error_severity": "INFO", "file_name": ".+", "backend_type": "compute_ctl_self", "pg_instance_id": "000", "pg_compute_id": "111", "message": ".+"\}"#
|
||||
).unwrap();
|
||||
|
||||
for (i, expected_message) in messages.iter().enumerate() {
|
||||
let json_string = json_strings[i];
|
||||
assert!(
|
||||
json_string_shape_regex.is_match(json_string),
|
||||
"Json log didn't match expected pattern:\n{json_string}",
|
||||
);
|
||||
let parsed_json: serde_json::Value = serde_json::from_str(json_string).unwrap();
|
||||
let actual_message = parsed_json["message"].as_str().unwrap();
|
||||
assert_eq!(*expected_message, actual_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
30
compute_tools/src/pg_isready.rs
Normal file
30
compute_tools/src/pg_isready.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
|
||||
// Run `/usr/local/bin/pg_isready -p {port}`
|
||||
// Check the connectivity of PG
|
||||
// Success means PG is listening on the port and accepting connections
|
||||
// Note that PG does not need to authenticate the connection, nor reserve a connection quota for it.
|
||||
// See https://www.postgresql.org/docs/current/app-pg-isready.html
|
||||
pub fn pg_isready(bin: &str, port: u16) -> anyhow::Result<()> {
|
||||
let child_result = std::process::Command::new(bin)
|
||||
.arg("-p")
|
||||
.arg(port.to_string())
|
||||
.spawn();
|
||||
|
||||
child_result
|
||||
.context("spawn() failed")
|
||||
.and_then(|mut child| child.wait().context("wait() failed"))
|
||||
.and_then(|status| match status.success() {
|
||||
true => Ok(()),
|
||||
false => Err(anyhow!("process exited with {status}")),
|
||||
})
|
||||
// wrap any prior error with the overall context that we couldn't run the command
|
||||
.with_context(|| format!("could not run `{bin} --port {port}`"))
|
||||
}
|
||||
|
||||
// It's safe to assume pg_isready is under the same directory with postgres,
|
||||
// because it is a PG util bin installed along with postgres
|
||||
pub fn get_pg_isready_bin(pgbin: &str) -> String {
|
||||
let split = pgbin.split("/").collect::<Vec<&str>>();
|
||||
split[0..split.len() - 1].join("/") + "/pg_isready"
|
||||
}
|
||||
@@ -142,7 +142,7 @@ pub fn update_pg_hba(pgdata_path: &Path, databricks_pg_hba: Option<&String>) ->
|
||||
// Update pg_hba to contains databricks specfic settings before adding neon settings
|
||||
// PG uses the first record that matches to perform authentication, so we need to have
|
||||
// our rules before the default ones from neon.
|
||||
// See https://www.postgresql.org/docs/16/auth-pg-hba-conf.html
|
||||
// See https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
|
||||
if let Some(databricks_pg_hba) = databricks_pg_hba {
|
||||
if config::line_in_file(
|
||||
&pghba_path,
|
||||
|
||||
@@ -560,7 +560,9 @@ enum EndpointCmd {
|
||||
Create(EndpointCreateCmdArgs),
|
||||
Start(EndpointStartCmdArgs),
|
||||
Reconfigure(EndpointReconfigureCmdArgs),
|
||||
RefreshConfiguration(EndpointRefreshConfigurationArgs),
|
||||
Stop(EndpointStopCmdArgs),
|
||||
UpdatePageservers(EndpointUpdatePageserversCmdArgs),
|
||||
GenerateJwt(EndpointGenerateJwtCmdArgs),
|
||||
}
|
||||
|
||||
@@ -721,6 +723,13 @@ struct EndpointReconfigureCmdArgs {
|
||||
safekeepers: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Refresh the endpoint's configuration by forcing it reload it's spec")]
|
||||
struct EndpointRefreshConfigurationArgs {
|
||||
#[clap(help = "Postgres endpoint id")]
|
||||
endpoint_id: String,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Stop an endpoint")]
|
||||
struct EndpointStopCmdArgs {
|
||||
@@ -738,6 +747,16 @@ struct EndpointStopCmdArgs {
|
||||
mode: EndpointTerminateMode,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Update the pageservers in the spec file of the compute endpoint")]
|
||||
struct EndpointUpdatePageserversCmdArgs {
|
||||
#[clap(help = "Postgres endpoint id")]
|
||||
endpoint_id: String,
|
||||
|
||||
#[clap(short = 'p', long, help = "Specified pageserver id")]
|
||||
pageserver_id: Option<NodeId>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Generate a JWT for an endpoint")]
|
||||
struct EndpointGenerateJwtCmdArgs {
|
||||
@@ -1518,7 +1537,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.ok_or_else(|| anyhow::anyhow!("endpoint {endpoint_id} not found"))?;
|
||||
.ok_or_else(|| anyhow!("endpoint {endpoint_id} not found"))?;
|
||||
|
||||
if !args.allow_multiple {
|
||||
cplane.check_conflicting_endpoints(
|
||||
@@ -1629,6 +1648,44 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
println!("Starting existing endpoint {endpoint_id}...");
|
||||
endpoint.start(args).await?;
|
||||
}
|
||||
EndpointCmd::UpdatePageservers(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
let pageservers = match args.pageserver_id {
|
||||
Some(pageserver_id) => {
|
||||
let pageserver =
|
||||
PageServerNode::from_env(env, env.get_pageserver_conf(pageserver_id)?);
|
||||
|
||||
vec![(
|
||||
PageserverProtocol::Libpq,
|
||||
pageserver.pg_connection_config.host().clone(),
|
||||
pageserver.pg_connection_config.port(),
|
||||
)]
|
||||
}
|
||||
None => {
|
||||
let storage_controller = StorageController::from_env(env);
|
||||
storage_controller
|
||||
.tenant_locate(endpoint.tenant_id)
|
||||
.await?
|
||||
.shards
|
||||
.into_iter()
|
||||
.map(|shard| {
|
||||
(
|
||||
PageserverProtocol::Libpq,
|
||||
Host::parse(&shard.listen_pg_addr)
|
||||
.expect("Storage controller reported malformed host"),
|
||||
shard.listen_pg_port,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
endpoint.update_pageservers_in_config(pageservers).await?;
|
||||
}
|
||||
EndpointCmd::Reconfigure(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
@@ -1682,6 +1739,14 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
.reconfigure(Some(pageservers), None, safekeepers, None)
|
||||
.await?;
|
||||
}
|
||||
EndpointCmd::RefreshConfiguration(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
endpoint.refresh_configuration().await?;
|
||||
}
|
||||
EndpointCmd::Stop(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
|
||||
@@ -846,6 +846,7 @@ impl Endpoint {
|
||||
autoprewarm: args.autoprewarm,
|
||||
offload_lfc_interval_seconds: args.offload_lfc_interval_seconds,
|
||||
suspend_timeout_seconds: -1, // Only used in neon_local.
|
||||
databricks_settings: None,
|
||||
};
|
||||
|
||||
// this strange code is needed to support respec() in tests
|
||||
@@ -990,7 +991,9 @@ impl Endpoint {
|
||||
| ComputeStatus::Configuration
|
||||
| ComputeStatus::TerminationPendingFast
|
||||
| ComputeStatus::TerminationPendingImmediate
|
||||
| ComputeStatus::Terminated => {
|
||||
| ComputeStatus::Terminated
|
||||
| ComputeStatus::RefreshConfigurationPending
|
||||
| ComputeStatus::RefreshConfiguration => {
|
||||
bail!("unexpected compute status: {:?}", state.status)
|
||||
}
|
||||
}
|
||||
@@ -1013,6 +1016,29 @@ impl Endpoint {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Update the pageservers in the spec file of the endpoint. This is useful to test the spec refresh scenario.
|
||||
pub async fn update_pageservers_in_config(
|
||||
&self,
|
||||
pageservers: Vec<(PageserverProtocol, Host, u16)>,
|
||||
) -> Result<()> {
|
||||
let config_path = self.endpoint_path().join("config.json");
|
||||
let mut config: ComputeConfig = {
|
||||
let file = std::fs::File::open(&config_path)?;
|
||||
serde_json::from_reader(file)?
|
||||
};
|
||||
|
||||
let pageserver_connstring = Self::build_pageserver_connstr(&pageservers);
|
||||
assert!(!pageserver_connstring.is_empty());
|
||||
let mut spec = config.spec.unwrap();
|
||||
spec.pageserver_connstring = Some(pageserver_connstring);
|
||||
config.spec = Some(spec);
|
||||
|
||||
let file = std::fs::File::create(&config_path)?;
|
||||
serde_json::to_writer_pretty(file, &config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Call the /status HTTP API
|
||||
pub async fn get_status(&self) -> Result<ComputeStatusResponse> {
|
||||
let client = reqwest::Client::new();
|
||||
@@ -1178,6 +1204,33 @@ impl Endpoint {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub async fn refresh_configuration(&self) -> Result<()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.unwrap();
|
||||
let response = client
|
||||
.post(format!(
|
||||
"http://{}:{}/refresh_configuration",
|
||||
self.internal_http_address.ip(),
|
||||
self.internal_http_address.port()
|
||||
))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let status = response.status();
|
||||
if !(status.is_client_error() || status.is_server_error()) {
|
||||
Ok(())
|
||||
} else {
|
||||
let url = response.url().to_owned();
|
||||
let msg = match response.text().await {
|
||||
Ok(err_body) => format!("Error: {err_body}"),
|
||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||
};
|
||||
Err(anyhow::anyhow!(msg))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connstr(&self, user: &str, db_name: &str) -> String {
|
||||
format!(
|
||||
"postgresql://{}@{}:{}/{}",
|
||||
|
||||
@@ -108,11 +108,10 @@ pub enum PromoteState {
|
||||
Failed { error: String },
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
|
||||
#[derive(Deserialize, Default, Debug)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
/// Result of /safekeepers_lsn
|
||||
pub struct SafekeepersLsn {
|
||||
pub safekeepers: String,
|
||||
pub struct PromoteConfig {
|
||||
pub spec: ComputeSpec,
|
||||
pub wal_flush_lsn: utils::lsn::Lsn,
|
||||
}
|
||||
|
||||
@@ -173,6 +172,11 @@ pub enum ComputeStatus {
|
||||
TerminationPendingImmediate,
|
||||
// Terminated Postgres
|
||||
Terminated,
|
||||
// A spec refresh is being requested
|
||||
RefreshConfigurationPending,
|
||||
// A spec refresh is being applied. We cannot refresh configuration again until the current
|
||||
// refresh is done, i.e., signal_refresh_configuration() will return 500 error.
|
||||
RefreshConfiguration,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
@@ -185,6 +189,10 @@ impl Display for ComputeStatus {
|
||||
match self {
|
||||
ComputeStatus::Empty => f.write_str("empty"),
|
||||
ComputeStatus::ConfigurationPending => f.write_str("configuration-pending"),
|
||||
ComputeStatus::RefreshConfiguration => f.write_str("refresh-configuration"),
|
||||
ComputeStatus::RefreshConfigurationPending => {
|
||||
f.write_str("refresh-configuration-pending")
|
||||
}
|
||||
ComputeStatus::Init => f.write_str("init"),
|
||||
ComputeStatus::Running => f.write_str("running"),
|
||||
ComputeStatus::Configuration => f.write_str("configuration"),
|
||||
|
||||
@@ -193,6 +193,9 @@ pub struct ComputeSpec {
|
||||
///
|
||||
/// We use this value to derive other values, such as the installed extensions metric.
|
||||
pub suspend_timeout_seconds: i64,
|
||||
|
||||
// Databricks specific options for compute instance.
|
||||
pub databricks_settings: Option<DatabricksSettings>,
|
||||
}
|
||||
|
||||
/// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
|
||||
|
||||
@@ -558,11 +558,11 @@ async fn add_request_id_header_to_response(
|
||||
mut res: Response<Body>,
|
||||
req_info: RequestInfo,
|
||||
) -> Result<Response<Body>, ApiError> {
|
||||
if let Some(request_id) = req_info.context::<RequestId>() {
|
||||
if let Ok(request_header_value) = HeaderValue::from_str(&request_id.0) {
|
||||
res.headers_mut()
|
||||
.insert(&X_REQUEST_ID_HEADER, request_header_value);
|
||||
};
|
||||
if let Some(request_id) = req_info.context::<RequestId>()
|
||||
&& let Ok(request_header_value) = HeaderValue::from_str(&request_id.0)
|
||||
{
|
||||
res.headers_mut()
|
||||
.insert(&X_REQUEST_ID_HEADER, request_header_value);
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
|
||||
@@ -72,10 +72,10 @@ impl Server {
|
||||
if err.is_incomplete_message() || err.is_closed() || err.is_timeout() {
|
||||
return true;
|
||||
}
|
||||
if let Some(inner) = err.source() {
|
||||
if let Some(io) = inner.downcast_ref::<std::io::Error>() {
|
||||
return suppress_io_error(io);
|
||||
}
|
||||
if let Some(inner) = err.source()
|
||||
&& let Some(io) = inner.downcast_ref::<std::io::Error>()
|
||||
{
|
||||
return suppress_io_error(io);
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -129,6 +129,12 @@ impl<L: LabelGroup> InfoMetric<L> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: LabelGroup + Default> Default for InfoMetric<L, GaugeState> {
|
||||
fn default() -> Self {
|
||||
InfoMetric::new(L::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: LabelGroup, M: MetricType<Metadata = ()>> InfoMetric<L, M> {
|
||||
pub fn with_metric(label: L, metric: M) -> Self {
|
||||
Self {
|
||||
|
||||
@@ -363,7 +363,7 @@ where
|
||||
// TODO: An Iterator might be nicer. The communicator's clock algorithm needs to
|
||||
// _slowly_ iterate through all buckets with its clock hand, without holding a lock.
|
||||
// If we switch to an Iterator, it must not hold the lock.
|
||||
pub fn get_at_bucket(&self, pos: usize) -> Option<ValueReadGuard<(K, V)>> {
|
||||
pub fn get_at_bucket(&self, pos: usize) -> Option<ValueReadGuard<'_, (K, V)>> {
|
||||
let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read();
|
||||
if pos >= map.buckets.len() {
|
||||
return None;
|
||||
|
||||
@@ -1500,6 +1500,7 @@ pub struct TimelineArchivalConfigRequest {
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
pub struct TimelinePatchIndexPartRequest {
|
||||
pub rel_size_migration: Option<RelSizeMigration>,
|
||||
pub rel_size_migrated_at: Option<Lsn>,
|
||||
pub gc_compaction_last_completed_lsn: Option<Lsn>,
|
||||
pub applied_gc_cutoff_lsn: Option<Lsn>,
|
||||
#[serde(default)]
|
||||
@@ -1533,10 +1534,10 @@ pub enum RelSizeMigration {
|
||||
/// `None` is the same as `Some(RelSizeMigration::Legacy)`.
|
||||
Legacy,
|
||||
/// The tenant is migrating to the new rel_size format. Both old and new rel_size format are
|
||||
/// persisted in the index part. The read path will read both formats and merge them.
|
||||
/// persisted in the storage. The read path will read both formats and validate them.
|
||||
Migrating,
|
||||
/// The tenant has migrated to the new rel_size format. Only the new rel_size format is persisted
|
||||
/// in the index part, and the read path will not read the old format.
|
||||
/// in the storage, and the read path will not read the old format.
|
||||
Migrated,
|
||||
}
|
||||
|
||||
@@ -1619,6 +1620,7 @@ pub struct TimelineInfo {
|
||||
|
||||
/// The status of the rel_size migration.
|
||||
pub rel_size_migration: Option<RelSizeMigration>,
|
||||
pub rel_size_migrated_at: Option<Lsn>,
|
||||
|
||||
/// Whether the timeline is invisible in synthetic size calculations.
|
||||
pub is_invisible: Option<bool>,
|
||||
|
||||
@@ -12,7 +12,6 @@ crc32c.workspace = true
|
||||
criterion.workspace = true
|
||||
once_cell.workspace = true
|
||||
log.workspace = true
|
||||
memoffset.workspace = true
|
||||
pprof.workspace = true
|
||||
thiserror.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -34,9 +34,8 @@ const SIZEOF_CONTROLDATA: usize = size_of::<ControlFileData>();
|
||||
impl ControlFileData {
|
||||
/// Compute the offset of the `crc` field within the `ControlFileData` struct.
|
||||
/// Equivalent to offsetof(ControlFileData, crc) in C.
|
||||
// Someday this can be const when the right compiler features land.
|
||||
fn pg_control_crc_offset() -> usize {
|
||||
memoffset::offset_of!(ControlFileData, crc)
|
||||
const fn pg_control_crc_offset() -> usize {
|
||||
std::mem::offset_of!(ControlFileData, crc)
|
||||
}
|
||||
|
||||
///
|
||||
|
||||
@@ -15,6 +15,7 @@ use tokio::sync::mpsc;
|
||||
use crate::cancel_token::RawCancelToken;
|
||||
use crate::codec::{BackendMessages, FrontendMessage, RecordNotices};
|
||||
use crate::config::{Host, SslMode};
|
||||
use crate::connection::gc_bytesmut;
|
||||
use crate::query::RowStream;
|
||||
use crate::simple_query::SimpleQueryStream;
|
||||
use crate::types::{Oid, Type};
|
||||
@@ -95,20 +96,13 @@ impl InnerClient {
|
||||
Ok(PartialQuery(Some(self)))
|
||||
}
|
||||
|
||||
// pub fn send_with_sync<F>(&mut self, f: F) -> Result<&mut Responses, Error>
|
||||
// where
|
||||
// F: FnOnce(&mut BytesMut) -> Result<(), Error>,
|
||||
// {
|
||||
// self.start()?.send_with_sync(f)
|
||||
// }
|
||||
|
||||
pub fn send_simple_query(&mut self, query: &str) -> Result<&mut Responses, Error> {
|
||||
self.responses.waiting += 1;
|
||||
|
||||
self.buffer.clear();
|
||||
// simple queries do not need sync.
|
||||
frontend::query(query, &mut self.buffer).map_err(Error::encode)?;
|
||||
let buf = self.buffer.split().freeze();
|
||||
let buf = self.buffer.split();
|
||||
self.send_message(FrontendMessage::Raw(buf))
|
||||
}
|
||||
|
||||
@@ -125,7 +119,7 @@ impl Drop for PartialQuery<'_> {
|
||||
if let Some(client) = self.0.take() {
|
||||
client.buffer.clear();
|
||||
frontend::sync(&mut client.buffer);
|
||||
let buf = client.buffer.split().freeze();
|
||||
let buf = client.buffer.split();
|
||||
let _ = client.send_message(FrontendMessage::Raw(buf));
|
||||
}
|
||||
}
|
||||
@@ -141,7 +135,7 @@ impl<'a> PartialQuery<'a> {
|
||||
client.buffer.clear();
|
||||
f(&mut client.buffer)?;
|
||||
frontend::flush(&mut client.buffer);
|
||||
let buf = client.buffer.split().freeze();
|
||||
let buf = client.buffer.split();
|
||||
client.send_message(FrontendMessage::Raw(buf))
|
||||
}
|
||||
|
||||
@@ -154,7 +148,7 @@ impl<'a> PartialQuery<'a> {
|
||||
client.buffer.clear();
|
||||
f(&mut client.buffer)?;
|
||||
frontend::sync(&mut client.buffer);
|
||||
let buf = client.buffer.split().freeze();
|
||||
let buf = client.buffer.split();
|
||||
let _ = client.send_message(FrontendMessage::Raw(buf));
|
||||
|
||||
Ok(&mut self.0.take().unwrap().responses)
|
||||
@@ -191,6 +185,7 @@ impl Client {
|
||||
ssl_mode: SslMode,
|
||||
process_id: i32,
|
||||
secret_key: i32,
|
||||
write_buf: BytesMut,
|
||||
) -> Client {
|
||||
Client {
|
||||
inner: InnerClient {
|
||||
@@ -201,7 +196,7 @@ impl Client {
|
||||
waiting: 0,
|
||||
received: 0,
|
||||
},
|
||||
buffer: Default::default(),
|
||||
buffer: write_buf,
|
||||
},
|
||||
cached_typeinfo: Default::default(),
|
||||
|
||||
@@ -292,8 +287,35 @@ impl Client {
|
||||
simple_query::batch_execute(self.inner_mut(), query).await
|
||||
}
|
||||
|
||||
pub async fn discard_all(&mut self) -> Result<ReadyForQueryStatus, Error> {
|
||||
self.batch_execute("discard all").await
|
||||
/// Similar to `discard_all`, but it does not clear any query plans
|
||||
///
|
||||
/// This runs in the background, so it can be executed without `await`ing.
|
||||
pub fn reset_session_background(&mut self) -> Result<(), Error> {
|
||||
// "CLOSE ALL": closes any cursors
|
||||
// "SET SESSION AUTHORIZATION DEFAULT": resets the current_user back to the session_user
|
||||
// "RESET ALL": resets any GUCs back to their session defaults.
|
||||
// "DEALLOCATE ALL": deallocates any prepared statements
|
||||
// "UNLISTEN *": stops listening on all channels
|
||||
// "SELECT pg_advisory_unlock_all();": unlocks all advisory locks
|
||||
// "DISCARD TEMP;": drops all temporary tables
|
||||
// "DISCARD SEQUENCES;": deallocates all cached sequence state
|
||||
|
||||
let _responses = self.inner_mut().send_simple_query(
|
||||
"ROLLBACK;
|
||||
CLOSE ALL;
|
||||
SET SESSION AUTHORIZATION DEFAULT;
|
||||
RESET ALL;
|
||||
DEALLOCATE ALL;
|
||||
UNLISTEN *;
|
||||
SELECT pg_advisory_unlock_all();
|
||||
DISCARD TEMP;
|
||||
DISCARD SEQUENCES;",
|
||||
)?;
|
||||
|
||||
// Clean up memory usage.
|
||||
gc_bytesmut(&mut self.inner_mut().buffer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Begins a new database transaction.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytes::BytesMut;
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use postgres_protocol2::message::backend;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
pub enum FrontendMessage {
|
||||
Raw(Bytes),
|
||||
Raw(BytesMut),
|
||||
RecordNotices(RecordNotices),
|
||||
}
|
||||
|
||||
@@ -17,7 +17,10 @@ pub struct RecordNotices {
|
||||
}
|
||||
|
||||
pub enum BackendMessage {
|
||||
Normal { messages: BackendMessages },
|
||||
Normal {
|
||||
messages: BackendMessages,
|
||||
ready: bool,
|
||||
},
|
||||
Async(backend::Message),
|
||||
}
|
||||
|
||||
@@ -40,11 +43,11 @@ impl FallibleIterator for BackendMessages {
|
||||
|
||||
pub struct PostgresCodec;
|
||||
|
||||
impl Encoder<Bytes> for PostgresCodec {
|
||||
impl Encoder<BytesMut> for PostgresCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> io::Result<()> {
|
||||
dst.extend_from_slice(&item);
|
||||
fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> io::Result<()> {
|
||||
dst.unsplit(item);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -56,6 +59,7 @@ impl Decoder for PostgresCodec {
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<BackendMessage>, io::Error> {
|
||||
let mut idx = 0;
|
||||
|
||||
let mut ready = false;
|
||||
while let Some(header) = backend::Header::parse(&src[idx..])? {
|
||||
let len = header.len() as usize + 1;
|
||||
if src[idx..].len() < len {
|
||||
@@ -79,6 +83,7 @@ impl Decoder for PostgresCodec {
|
||||
idx += len;
|
||||
|
||||
if header.tag() == backend::READY_FOR_QUERY_TAG {
|
||||
ready = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -88,6 +93,7 @@ impl Decoder for PostgresCodec {
|
||||
} else {
|
||||
Ok(Some(BackendMessage::Normal {
|
||||
messages: BackendMessages(src.split_to(idx)),
|
||||
ready,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,19 +250,20 @@ impl Config {
|
||||
{
|
||||
let stream = connect_tls(stream, self.ssl_mode, tls).await?;
|
||||
let mut stream = StartupStream::new(stream);
|
||||
connect_raw::startup(&mut stream, self).await?;
|
||||
connect_raw::authenticate(&mut stream, self).await?;
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
pub async fn authenticate<S, T>(&self, stream: &mut StartupStream<S, T>) -> Result<(), Error>
|
||||
pub fn authenticate<S, T>(
|
||||
&self,
|
||||
stream: &mut StartupStream<S, T>,
|
||||
) -> impl Future<Output = Result<(), Error>>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
connect_raw::startup(stream, self).await?;
|
||||
connect_raw::authenticate(stream, self).await
|
||||
connect_raw::authenticate(stream, self)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use tokio::net::TcpStream;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::client::SocketConfig;
|
||||
use crate::config::Host;
|
||||
use crate::config::{Host, SslMode};
|
||||
use crate::connect_raw::StartupStream;
|
||||
use crate::connect_socket::connect_socket;
|
||||
use crate::tls::{MakeTlsConnect, TlsConnect};
|
||||
@@ -45,28 +45,53 @@ where
|
||||
T: TlsConnect<TcpStream>,
|
||||
{
|
||||
let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?;
|
||||
let mut stream = config.tls_and_authenticate(socket, tls).await?;
|
||||
let stream = config.tls_and_authenticate(socket, tls).await?;
|
||||
managed(
|
||||
stream,
|
||||
host_addr,
|
||||
host.clone(),
|
||||
port,
|
||||
config.ssl_mode,
|
||||
config.connect_timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn managed<TlsStream>(
|
||||
mut stream: StartupStream<TcpStream, TlsStream>,
|
||||
host_addr: Option<IpAddr>,
|
||||
host: Host,
|
||||
port: u16,
|
||||
ssl_mode: SslMode,
|
||||
connect_timeout: Option<std::time::Duration>,
|
||||
) -> Result<(Client, Connection<TcpStream, TlsStream>), Error>
|
||||
where
|
||||
TlsStream: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let (process_id, secret_key) = wait_until_ready(&mut stream).await?;
|
||||
|
||||
let socket_config = SocketConfig {
|
||||
host_addr,
|
||||
host: host.clone(),
|
||||
host,
|
||||
port,
|
||||
connect_timeout: config.connect_timeout,
|
||||
connect_timeout,
|
||||
};
|
||||
|
||||
let mut stream = stream.into_framed();
|
||||
let write_buf = std::mem::take(stream.write_buffer_mut());
|
||||
|
||||
let (client_tx, conn_rx) = mpsc::unbounded_channel();
|
||||
let (conn_tx, client_rx) = mpsc::channel(4);
|
||||
let client = Client::new(
|
||||
client_tx,
|
||||
client_rx,
|
||||
socket_config,
|
||||
config.ssl_mode,
|
||||
ssl_mode,
|
||||
process_id,
|
||||
secret_key,
|
||||
write_buf,
|
||||
);
|
||||
|
||||
let stream = stream.into_framed();
|
||||
let connection = Connection::new(stream, conn_tx, conn_rx);
|
||||
|
||||
Ok((client, connection))
|
||||
|
||||
@@ -2,51 +2,28 @@ use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll, ready};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytes::BytesMut;
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use futures_util::{Sink, SinkExt, Stream, TryStreamExt};
|
||||
use futures_util::{SinkExt, Stream, TryStreamExt};
|
||||
use postgres_protocol2::authentication::sasl;
|
||||
use postgres_protocol2::authentication::sasl::ScramSha256;
|
||||
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message};
|
||||
use postgres_protocol2::message::frontend;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tokio_util::codec::{Framed, FramedParts, FramedWrite};
|
||||
use tokio_util::codec::{Framed, FramedParts};
|
||||
|
||||
use crate::Error;
|
||||
use crate::codec::PostgresCodec;
|
||||
use crate::config::{self, AuthKeys, Config};
|
||||
use crate::connection::{GC_THRESHOLD, INITIAL_CAPACITY};
|
||||
use crate::maybe_tls_stream::MaybeTlsStream;
|
||||
use crate::tls::TlsStream;
|
||||
|
||||
pub struct StartupStream<S, T> {
|
||||
inner: FramedWrite<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
inner: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
read_buf: BytesMut,
|
||||
}
|
||||
|
||||
impl<S, T> Sink<Bytes> for StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Error = io::Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_ready(cx)
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: Bytes) -> io::Result<()> {
|
||||
Pin::new(&mut self.inner).start_send(item)
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_close(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, T> Stream for StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
@@ -55,6 +32,8 @@ where
|
||||
type Item = io::Result<Message>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// We don't use `self.inner.poll_next()` as that might over-read into the read buffer.
|
||||
|
||||
// read 1 byte tag, 4 bytes length.
|
||||
let header = ready!(self.as_mut().poll_fill_buf_exact(cx, 5)?);
|
||||
|
||||
@@ -121,36 +100,28 @@ where
|
||||
}
|
||||
|
||||
pub fn into_framed(mut self) -> Framed<MaybeTlsStream<S, T>, PostgresCodec> {
|
||||
let write_buf = std::mem::take(self.inner.write_buffer_mut());
|
||||
let io = self.inner.into_inner();
|
||||
let mut parts = FramedParts::new(io, PostgresCodec);
|
||||
parts.read_buf = self.read_buf;
|
||||
parts.write_buf = write_buf;
|
||||
Framed::from_parts(parts)
|
||||
*self.inner.read_buffer_mut() = self.read_buf;
|
||||
self.inner
|
||||
}
|
||||
|
||||
pub fn new(io: MaybeTlsStream<S, T>) -> Self {
|
||||
let mut parts = FramedParts::new(io, PostgresCodec);
|
||||
parts.write_buf = BytesMut::with_capacity(INITIAL_CAPACITY);
|
||||
|
||||
let mut inner = Framed::from_parts(parts);
|
||||
|
||||
// This is the default already, but nice to be explicit.
|
||||
// We divide by two because writes will overshoot the boundary.
|
||||
// We don't want constant overshoots to cause us to constantly re-shrink the buffer.
|
||||
inner.set_backpressure_boundary(GC_THRESHOLD / 2);
|
||||
|
||||
Self {
|
||||
inner: FramedWrite::new(io, PostgresCodec),
|
||||
read_buf: BytesMut::new(),
|
||||
inner,
|
||||
read_buf: BytesMut::with_capacity(INITIAL_CAPACITY),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn startup<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
config: &Config,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::startup_message(&config.server_params, &mut buf).map_err(Error::encode)?;
|
||||
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
pub(crate) async fn authenticate<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
config: &Config,
|
||||
@@ -159,6 +130,10 @@ where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
frontend::startup_message(&config.server_params, stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationOk) => {
|
||||
can_skip_channel_binding(config)?;
|
||||
@@ -172,7 +147,8 @@ where
|
||||
.as_ref()
|
||||
.ok_or_else(|| Error::config("password missing".into()))?;
|
||||
|
||||
authenticate_password(stream, pass).await?;
|
||||
frontend::password_message(pass, stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
}
|
||||
Some(Message::AuthenticationSasl(body)) => {
|
||||
authenticate_sasl(stream, body, config).await?;
|
||||
@@ -191,6 +167,7 @@ where
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationOk) => Ok(()),
|
||||
Some(Message::ErrorResponse(body)) => Err(Error::db(body)),
|
||||
@@ -208,20 +185,6 @@ fn can_skip_channel_binding(config: &Config) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authenticate_password<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
password: &[u8],
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::password_message(password, &mut buf).map_err(Error::encode)?;
|
||||
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
async fn authenticate_sasl<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
body: AuthenticationSaslBody,
|
||||
@@ -276,10 +239,10 @@ where
|
||||
return Err(Error::config("password or auth keys missing".into()));
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?;
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)?;
|
||||
frontend::sasl_initial_response(mechanism, scram.message(), stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
let body = match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationSaslContinue(body)) => body,
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
@@ -292,10 +255,10 @@ where
|
||||
.await
|
||||
.map_err(|e| Error::authentication(e.into()))?;
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?;
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)?;
|
||||
frontend::sasl_response(scram.message(), stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
let body = match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationSaslFinal(body)) => body,
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
|
||||
@@ -44,6 +44,27 @@ pub struct Connection<S, T> {
|
||||
state: State,
|
||||
}
|
||||
|
||||
pub const INITIAL_CAPACITY: usize = 2 * 1024;
|
||||
pub const GC_THRESHOLD: usize = 16 * 1024;
|
||||
|
||||
/// Gargabe collect the [`BytesMut`] if it has too much spare capacity.
|
||||
pub fn gc_bytesmut(buf: &mut BytesMut) {
|
||||
// We use a different mode to shrink the buf when above the threshold.
|
||||
// When above the threshold, we only re-allocate when the buf has 2x spare capacity.
|
||||
let reclaim = GC_THRESHOLD.checked_sub(buf.len()).unwrap_or(buf.len());
|
||||
|
||||
// `try_reclaim` tries to get the capacity from any shared `BytesMut`s,
|
||||
// before then comparing the length against the capacity.
|
||||
if buf.try_reclaim(reclaim) {
|
||||
let capacity = usize::max(buf.len(), INITIAL_CAPACITY);
|
||||
|
||||
// Allocate a new `BytesMut` so that we deallocate the old version.
|
||||
let mut new = BytesMut::with_capacity(capacity);
|
||||
new.extend_from_slice(buf);
|
||||
*buf = new;
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Never {}
|
||||
|
||||
impl<S, T> Connection<S, T>
|
||||
@@ -86,7 +107,14 @@ where
|
||||
continue;
|
||||
}
|
||||
BackendMessage::Async(_) => continue,
|
||||
BackendMessage::Normal { messages } => messages,
|
||||
BackendMessage::Normal { messages, ready } => {
|
||||
// if we read a ReadyForQuery from postgres, let's try GC the read buffer.
|
||||
if ready {
|
||||
gc_bytesmut(self.stream.read_buffer_mut());
|
||||
}
|
||||
|
||||
messages
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -177,12 +205,7 @@ where
|
||||
// Send a terminate message to postgres
|
||||
Poll::Ready(None) => {
|
||||
trace!("poll_write: at eof, terminating");
|
||||
let mut request = BytesMut::new();
|
||||
frontend::terminate(&mut request);
|
||||
|
||||
Pin::new(&mut self.stream)
|
||||
.start_send(request.freeze())
|
||||
.map_err(Error::io)?;
|
||||
frontend::terminate(self.stream.write_buffer_mut());
|
||||
|
||||
trace!("poll_write: sent eof, closing");
|
||||
trace!("poll_write: done");
|
||||
@@ -205,6 +228,13 @@ where
|
||||
{
|
||||
Poll::Ready(()) => {
|
||||
trace!("poll_flush: flushed");
|
||||
|
||||
// Since our codec prefers to share the buffer with the `Client`,
|
||||
// if we don't release our share, then the `Client` would have to re-alloc
|
||||
// the buffer when they next use it.
|
||||
debug_assert!(self.stream.write_buffer().is_empty());
|
||||
*self.stream.write_buffer_mut() = BytesMut::new();
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
Poll::Pending => {
|
||||
|
||||
@@ -48,7 +48,7 @@ mod cancel_token;
|
||||
mod client;
|
||||
mod codec;
|
||||
pub mod config;
|
||||
mod connect;
|
||||
pub mod connect;
|
||||
pub mod connect_raw;
|
||||
mod connect_socket;
|
||||
mod connect_tls;
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
hyper0.workspace = true
|
||||
opentelemetry = { workspace = true, features = ["trace"] }
|
||||
opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = { workspace = true, default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-otlp = { workspace = true, default-features = false, features = ["http-proto", "trace", "http", "reqwest-blocking-client"] }
|
||||
opentelemetry-semantic-conventions.workspace = true
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -49,7 +49,7 @@ impl PerfSpan {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enter(&self) -> PerfSpanEntered {
|
||||
pub fn enter(&self) -> PerfSpanEntered<'_> {
|
||||
if let Some(ref id) = self.inner.id() {
|
||||
self.dispatch.enter(id);
|
||||
}
|
||||
|
||||
@@ -429,9 +429,11 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState {
|
||||
};
|
||||
|
||||
let empty_wal_rate_limiter = crate::bindings::WalRateLimiter {
|
||||
effective_max_wal_bytes_per_second: crate::bindings::pg_atomic_uint32 { value: 0 },
|
||||
should_limit: crate::bindings::pg_atomic_uint32 { value: 0 },
|
||||
sent_bytes: 0,
|
||||
last_recorded_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
batch_start_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
batch_end_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
};
|
||||
|
||||
crate::bindings::WalproposerShmemState {
|
||||
|
||||
@@ -727,7 +727,7 @@ fn start_pageserver(
|
||||
disk_usage_eviction_state,
|
||||
deletion_queue.new_client(),
|
||||
secondary_controller,
|
||||
feature_resolver,
|
||||
feature_resolver.clone(),
|
||||
)
|
||||
.context("Failed to initialize router state")?,
|
||||
);
|
||||
@@ -853,14 +853,14 @@ fn start_pageserver(
|
||||
} else {
|
||||
None
|
||||
},
|
||||
feature_resolver.clone(),
|
||||
);
|
||||
|
||||
// Spawn a Pageserver gRPC server task. It will spawn separate tasks for
|
||||
// each stream/request.
|
||||
// Spawn a Pageserver gRPC server task. It will spawn separate tasks for each request/stream.
|
||||
// It uses a separate compute request Tokio runtime (COMPUTE_REQUEST_RUNTIME).
|
||||
//
|
||||
// TODO: this uses a separate Tokio runtime for the page service. If we want
|
||||
// other gRPC services, they will need their own port and runtime. Is this
|
||||
// necessary?
|
||||
// NB: this port is exposed to computes. It should only provide services that we're okay with
|
||||
// computes accessing. Internal services should use a separate port.
|
||||
let mut page_service_grpc = None;
|
||||
if let Some(grpc_listener) = grpc_listener {
|
||||
page_service_grpc = Some(GrpcPageServiceHandler::spawn(
|
||||
|
||||
@@ -486,6 +486,8 @@ async fn build_timeline_info_common(
|
||||
*timeline.get_applied_gc_cutoff_lsn(),
|
||||
);
|
||||
|
||||
let (rel_size_migration, rel_size_migrated_at) = timeline.get_rel_size_v2_status();
|
||||
|
||||
let info = TimelineInfo {
|
||||
tenant_id: timeline.tenant_shard_id,
|
||||
timeline_id: timeline.timeline_id,
|
||||
@@ -517,7 +519,8 @@ async fn build_timeline_info_common(
|
||||
|
||||
state,
|
||||
is_archived: Some(is_archived),
|
||||
rel_size_migration: Some(timeline.get_rel_size_v2_status()),
|
||||
rel_size_migration: Some(rel_size_migration),
|
||||
rel_size_migrated_at,
|
||||
is_invisible: Some(is_invisible),
|
||||
|
||||
walreceiver_status,
|
||||
@@ -941,9 +944,16 @@ async fn timeline_patch_index_part_handler(
|
||||
active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
|
||||
.await?;
|
||||
|
||||
if request_data.rel_size_migration.is_none() && request_data.rel_size_migrated_at.is_some()
|
||||
{
|
||||
return Err(ApiError::BadRequest(anyhow!(
|
||||
"updating rel_size_migrated_at without rel_size_migration is not allowed"
|
||||
)));
|
||||
}
|
||||
|
||||
if let Some(rel_size_migration) = request_data.rel_size_migration {
|
||||
timeline
|
||||
.update_rel_size_v2_status(rel_size_migration)
|
||||
.update_rel_size_v2_status(rel_size_migration, request_data.rel_size_migrated_at)
|
||||
.map_err(ApiError::InternalServerError)?;
|
||||
}
|
||||
|
||||
@@ -2006,6 +2016,10 @@ async fn put_tenant_location_config_handler(
|
||||
let state = get_state(&request);
|
||||
let conf = state.conf;
|
||||
|
||||
fail::fail_point!("put-location-conf-handler", |_| {
|
||||
Err(ApiError::ResourceUnavailable("failpoint".into()))
|
||||
});
|
||||
|
||||
// The `Detached` state is special, it doesn't upsert a tenant, it removes
|
||||
// its local disk content and drops it from memory.
|
||||
if let LocationConfigMode::Detached = request_data.config.mode {
|
||||
|
||||
@@ -57,7 +57,7 @@ pub async fn import_timeline_from_postgres_datadir(
|
||||
|
||||
// TODO this shoud be start_lsn, which is not necessarily equal to end_lsn (aka lsn)
|
||||
// Then fishing out pg_control would be unnecessary
|
||||
let mut modification = tline.begin_modification(pgdata_lsn);
|
||||
let mut modification = tline.begin_modification_for_import(pgdata_lsn);
|
||||
modification.init_empty()?;
|
||||
|
||||
// Import all but pg_wal
|
||||
@@ -309,7 +309,7 @@ async fn import_wal(
|
||||
waldecoder.feed_bytes(&buf);
|
||||
|
||||
let mut nrecords = 0;
|
||||
let mut modification = tline.begin_modification(last_lsn);
|
||||
let mut modification = tline.begin_modification_for_import(last_lsn);
|
||||
while last_lsn <= endpoint {
|
||||
if let Some((lsn, recdata)) = waldecoder.poll_decode()? {
|
||||
let interpreted = InterpretedWalRecord::from_bytes_filtered(
|
||||
@@ -357,7 +357,7 @@ pub async fn import_basebackup_from_tar(
|
||||
ctx: &RequestContext,
|
||||
) -> Result<()> {
|
||||
info!("importing base at {base_lsn}");
|
||||
let mut modification = tline.begin_modification(base_lsn);
|
||||
let mut modification = tline.begin_modification_for_import(base_lsn);
|
||||
modification.init_empty()?;
|
||||
|
||||
let mut pg_control: Option<ControlFileData> = None;
|
||||
@@ -457,7 +457,7 @@ pub async fn import_wal_from_tar(
|
||||
|
||||
waldecoder.feed_bytes(&bytes[offset..]);
|
||||
|
||||
let mut modification = tline.begin_modification(last_lsn);
|
||||
let mut modification = tline.begin_modification_for_import(last_lsn);
|
||||
while last_lsn <= end_lsn {
|
||||
if let Some((lsn, recdata)) = waldecoder.poll_decode()? {
|
||||
let interpreted = InterpretedWalRecord::from_bytes_filtered(
|
||||
|
||||
@@ -68,6 +68,7 @@ use crate::config::PageServerConf;
|
||||
use crate::context::{
|
||||
DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder,
|
||||
};
|
||||
use crate::feature_resolver::FeatureResolver;
|
||||
use crate::metrics::{
|
||||
self, COMPUTE_COMMANDS_COUNTERS, ComputeCommandKind, GetPageBatchBreakReason, LIVE_CONNECTIONS,
|
||||
MISROUTED_PAGESTREAM_REQUESTS, PAGESTREAM_HANDLER_RESULTS_TOTAL, SmgrOpTimer, TimelineMetrics,
|
||||
@@ -139,6 +140,7 @@ pub fn spawn(
|
||||
perf_trace_dispatch: Option<Dispatch>,
|
||||
tcp_listener: tokio::net::TcpListener,
|
||||
tls_config: Option<Arc<rustls::ServerConfig>>,
|
||||
feature_resolver: FeatureResolver,
|
||||
) -> Listener {
|
||||
let cancel = CancellationToken::new();
|
||||
let libpq_ctx = RequestContext::todo_child(
|
||||
@@ -160,6 +162,7 @@ pub fn spawn(
|
||||
conf.pg_auth_type,
|
||||
tls_config,
|
||||
conf.page_service_pipelining.clone(),
|
||||
feature_resolver,
|
||||
libpq_ctx,
|
||||
cancel.clone(),
|
||||
)
|
||||
@@ -218,6 +221,7 @@ pub async fn libpq_listener_main(
|
||||
auth_type: AuthType,
|
||||
tls_config: Option<Arc<rustls::ServerConfig>>,
|
||||
pipelining_config: PageServicePipeliningConfig,
|
||||
feature_resolver: FeatureResolver,
|
||||
listener_ctx: RequestContext,
|
||||
listener_cancel: CancellationToken,
|
||||
) -> Connections {
|
||||
@@ -261,6 +265,7 @@ pub async fn libpq_listener_main(
|
||||
auth_type,
|
||||
tls_config.clone(),
|
||||
pipelining_config.clone(),
|
||||
feature_resolver.clone(),
|
||||
connection_ctx,
|
||||
connections_cancel.child_token(),
|
||||
gate_guard,
|
||||
@@ -303,6 +308,7 @@ async fn page_service_conn_main(
|
||||
auth_type: AuthType,
|
||||
tls_config: Option<Arc<rustls::ServerConfig>>,
|
||||
pipelining_config: PageServicePipeliningConfig,
|
||||
feature_resolver: FeatureResolver,
|
||||
connection_ctx: RequestContext,
|
||||
cancel: CancellationToken,
|
||||
gate_guard: GateGuard,
|
||||
@@ -370,6 +376,7 @@ async fn page_service_conn_main(
|
||||
perf_span_fields,
|
||||
connection_ctx,
|
||||
cancel.clone(),
|
||||
feature_resolver.clone(),
|
||||
gate_guard,
|
||||
);
|
||||
let pgbackend =
|
||||
@@ -421,6 +428,8 @@ struct PageServerHandler {
|
||||
pipelining_config: PageServicePipeliningConfig,
|
||||
get_vectored_concurrent_io: GetVectoredConcurrentIo,
|
||||
|
||||
feature_resolver: FeatureResolver,
|
||||
|
||||
gate_guard: GateGuard,
|
||||
}
|
||||
|
||||
@@ -535,6 +544,7 @@ impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrappe
|
||||
match resolved {
|
||||
ShardResolveResult::Found(tenant_shard) => break tenant_shard,
|
||||
ShardResolveResult::NotFound => {
|
||||
MISROUTED_PAGESTREAM_REQUESTS.inc();
|
||||
return Err(GetActiveTimelineError::Tenant(
|
||||
GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
|
||||
));
|
||||
@@ -586,6 +596,15 @@ impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrappe
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether to hold the applied GC cutoff guard when processing GetPage requests.
|
||||
/// This is determined once at the start of pagestream subprotocol handling based on
|
||||
/// feature flags, configuration, and test conditions.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum HoldAppliedGcCutoffGuard {
|
||||
Yes,
|
||||
No,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum PageStreamError {
|
||||
/// We encountered an error that should prompt the client to reconnect:
|
||||
@@ -729,6 +748,7 @@ enum BatchedFeMessage {
|
||||
GetPage {
|
||||
span: Span,
|
||||
shard: WeakHandle<TenantManagerTypes>,
|
||||
applied_gc_cutoff_guard: Option<RcuReadGuard<Lsn>>,
|
||||
pages: SmallVec<[BatchedGetPageRequest; 1]>,
|
||||
batch_break_reason: GetPageBatchBreakReason,
|
||||
},
|
||||
@@ -908,6 +928,7 @@ impl PageServerHandler {
|
||||
perf_span_fields: ConnectionPerfSpanFields,
|
||||
connection_ctx: RequestContext,
|
||||
cancel: CancellationToken,
|
||||
feature_resolver: FeatureResolver,
|
||||
gate_guard: GateGuard,
|
||||
) -> Self {
|
||||
PageServerHandler {
|
||||
@@ -919,6 +940,7 @@ impl PageServerHandler {
|
||||
cancel,
|
||||
pipelining_config,
|
||||
get_vectored_concurrent_io,
|
||||
feature_resolver,
|
||||
gate_guard,
|
||||
}
|
||||
}
|
||||
@@ -958,6 +980,7 @@ impl PageServerHandler {
|
||||
ctx: &RequestContext,
|
||||
protocol_version: PagestreamProtocolVersion,
|
||||
parent_span: Span,
|
||||
hold_gc_cutoff_guard: HoldAppliedGcCutoffGuard,
|
||||
) -> Result<Option<BatchedFeMessage>, QueryError>
|
||||
where
|
||||
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
|
||||
@@ -1195,19 +1218,27 @@ impl PageServerHandler {
|
||||
})
|
||||
.await?;
|
||||
|
||||
let applied_gc_cutoff_guard = shard.get_applied_gc_cutoff_lsn(); // hold guard
|
||||
// We're holding the Handle
|
||||
let effective_lsn = match Self::effective_request_lsn(
|
||||
&shard,
|
||||
shard.get_last_record_lsn(),
|
||||
req.hdr.request_lsn,
|
||||
req.hdr.not_modified_since,
|
||||
&shard.get_applied_gc_cutoff_lsn(),
|
||||
&applied_gc_cutoff_guard,
|
||||
) {
|
||||
Ok(lsn) => lsn,
|
||||
Err(e) => {
|
||||
return respond_error!(span, e);
|
||||
}
|
||||
};
|
||||
let applied_gc_cutoff_guard = match hold_gc_cutoff_guard {
|
||||
HoldAppliedGcCutoffGuard::Yes => Some(applied_gc_cutoff_guard),
|
||||
HoldAppliedGcCutoffGuard::No => {
|
||||
drop(applied_gc_cutoff_guard);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let batch_wait_ctx = if ctx.has_perf_span() {
|
||||
Some(
|
||||
@@ -1228,6 +1259,7 @@ impl PageServerHandler {
|
||||
BatchedFeMessage::GetPage {
|
||||
span,
|
||||
shard: shard.downgrade(),
|
||||
applied_gc_cutoff_guard,
|
||||
pages: smallvec![BatchedGetPageRequest {
|
||||
req,
|
||||
timer,
|
||||
@@ -1328,13 +1360,28 @@ impl PageServerHandler {
|
||||
match (eligible_batch, this_msg) {
|
||||
(
|
||||
BatchedFeMessage::GetPage {
|
||||
pages: accum_pages, ..
|
||||
pages: accum_pages,
|
||||
applied_gc_cutoff_guard: accum_applied_gc_cutoff_guard,
|
||||
..
|
||||
},
|
||||
BatchedFeMessage::GetPage {
|
||||
pages: this_pages, ..
|
||||
pages: this_pages,
|
||||
applied_gc_cutoff_guard: this_applied_gc_cutoff_guard,
|
||||
..
|
||||
},
|
||||
) => {
|
||||
accum_pages.extend(this_pages);
|
||||
// the minimum of the two guards will keep data for both alive
|
||||
match (&accum_applied_gc_cutoff_guard, this_applied_gc_cutoff_guard) {
|
||||
(None, None) => (),
|
||||
(None, Some(this)) => *accum_applied_gc_cutoff_guard = Some(this),
|
||||
(Some(_), None) => (),
|
||||
(Some(accum), Some(this)) => {
|
||||
if **accum > *this {
|
||||
*accum_applied_gc_cutoff_guard = Some(this);
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
#[cfg(feature = "testing")]
|
||||
@@ -1649,6 +1696,7 @@ impl PageServerHandler {
|
||||
BatchedFeMessage::GetPage {
|
||||
span,
|
||||
shard,
|
||||
applied_gc_cutoff_guard,
|
||||
pages,
|
||||
batch_break_reason,
|
||||
} => {
|
||||
@@ -1668,6 +1716,7 @@ impl PageServerHandler {
|
||||
.instrument(span.clone())
|
||||
.await;
|
||||
assert_eq!(res.len(), npages);
|
||||
drop(applied_gc_cutoff_guard);
|
||||
res
|
||||
},
|
||||
span,
|
||||
@@ -1749,7 +1798,7 @@ impl PageServerHandler {
|
||||
/// Coding discipline within this function: all interaction with the `pgb` connection
|
||||
/// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
|
||||
/// This is so that we can shutdown page_service quickly.
|
||||
#[instrument(skip_all)]
|
||||
#[instrument(skip_all, fields(hold_gc_cutoff_guard))]
|
||||
async fn handle_pagerequests<IO>(
|
||||
&mut self,
|
||||
pgb: &mut PostgresBackend<IO>,
|
||||
@@ -1795,6 +1844,30 @@ impl PageServerHandler {
|
||||
.take()
|
||||
.expect("implementation error: timeline_handles should not be locked");
|
||||
|
||||
// Evaluate the expensive feature resolver check once per pagestream subprotocol handling
|
||||
// instead of once per GetPage request. This is shared between pipelined and serial paths.
|
||||
let hold_gc_cutoff_guard = if cfg!(test) || cfg!(feature = "testing") {
|
||||
HoldAppliedGcCutoffGuard::Yes
|
||||
} else {
|
||||
// Use the global feature resolver with the tenant ID directly, avoiding the need
|
||||
// to get a timeline/shard which might not be available on this pageserver node.
|
||||
let empty_properties = std::collections::HashMap::new();
|
||||
match self.feature_resolver.evaluate_boolean(
|
||||
"page-service-getpage-hold-applied-gc-cutoff-guard",
|
||||
tenant_id,
|
||||
&empty_properties,
|
||||
) {
|
||||
Ok(()) => HoldAppliedGcCutoffGuard::Yes,
|
||||
Err(_) => HoldAppliedGcCutoffGuard::No,
|
||||
}
|
||||
};
|
||||
// record it in the span of handle_pagerequests so that both the request_span
|
||||
// and the pipeline implementation spans contains the field.
|
||||
Span::current().record(
|
||||
"hold_gc_cutoff_guard",
|
||||
tracing::field::debug(&hold_gc_cutoff_guard),
|
||||
);
|
||||
|
||||
let request_span = info_span!("request");
|
||||
let ((pgb_reader, timeline_handles), result) = match self.pipelining_config.clone() {
|
||||
PageServicePipeliningConfig::Pipelined(pipelining_config) => {
|
||||
@@ -1808,6 +1881,7 @@ impl PageServerHandler {
|
||||
pipelining_config,
|
||||
protocol_version,
|
||||
io_concurrency,
|
||||
hold_gc_cutoff_guard,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
@@ -1822,6 +1896,7 @@ impl PageServerHandler {
|
||||
request_span,
|
||||
protocol_version,
|
||||
io_concurrency,
|
||||
hold_gc_cutoff_guard,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
@@ -1850,6 +1925,7 @@ impl PageServerHandler {
|
||||
request_span: Span,
|
||||
protocol_version: PagestreamProtocolVersion,
|
||||
io_concurrency: IoConcurrency,
|
||||
hold_gc_cutoff_guard: HoldAppliedGcCutoffGuard,
|
||||
ctx: &RequestContext,
|
||||
) -> (
|
||||
(PostgresBackendReader<IO>, TimelineHandles),
|
||||
@@ -1871,6 +1947,7 @@ impl PageServerHandler {
|
||||
ctx,
|
||||
protocol_version,
|
||||
request_span.clone(),
|
||||
hold_gc_cutoff_guard,
|
||||
)
|
||||
.await;
|
||||
let msg = match msg {
|
||||
@@ -1918,6 +1995,7 @@ impl PageServerHandler {
|
||||
pipelining_config: PageServicePipeliningConfigPipelined,
|
||||
protocol_version: PagestreamProtocolVersion,
|
||||
io_concurrency: IoConcurrency,
|
||||
hold_gc_cutoff_guard: HoldAppliedGcCutoffGuard,
|
||||
ctx: &RequestContext,
|
||||
) -> (
|
||||
(PostgresBackendReader<IO>, TimelineHandles),
|
||||
@@ -2021,6 +2099,7 @@ impl PageServerHandler {
|
||||
&ctx,
|
||||
protocol_version,
|
||||
request_span.clone(),
|
||||
hold_gc_cutoff_guard,
|
||||
)
|
||||
.await;
|
||||
let Some(read_res) = read_res.transpose() else {
|
||||
@@ -2067,6 +2146,7 @@ impl PageServerHandler {
|
||||
pages,
|
||||
span: _,
|
||||
shard: _,
|
||||
applied_gc_cutoff_guard: _,
|
||||
batch_break_reason: _,
|
||||
} = &mut batch
|
||||
{
|
||||
@@ -3428,8 +3508,6 @@ impl GrpcPageServiceHandler {
|
||||
/// NB: errors returned from here are intercepted in get_pages(), and may be converted to a
|
||||
/// GetPageResponse with an appropriate status code to avoid terminating the stream.
|
||||
///
|
||||
/// TODO: verify that the requested pages belong to this shard.
|
||||
///
|
||||
/// TODO: get_vectored() currently enforces a batch limit of 32. Postgres will typically send
|
||||
/// batches up to effective_io_concurrency = 100. Either we have to accept large batches, or
|
||||
/// split them up in the client or server.
|
||||
@@ -3455,6 +3533,19 @@ impl GrpcPageServiceHandler {
|
||||
lsn = %req.read_lsn,
|
||||
);
|
||||
|
||||
for &blkno in &req.block_numbers {
|
||||
let shard = timeline.get_shard_identity();
|
||||
let key = rel_block_to_key(req.rel, blkno);
|
||||
if !shard.is_key_local(&key) {
|
||||
return Err(tonic::Status::invalid_argument(format!(
|
||||
"block {blkno} of relation {} requested on wrong shard {} (is on {})",
|
||||
req.rel,
|
||||
timeline.get_shard_index(),
|
||||
ShardIndex::new(shard.get_shard_number(&key), shard.count),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn(); // hold guard
|
||||
let effective_lsn = PageServerHandler::effective_request_lsn(
|
||||
&timeline,
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
//! walingest.rs handles a few things like implicit relation creation and extension.
|
||||
//! Clarify that)
|
||||
//!
|
||||
use std::collections::{HashMap, HashSet, hash_map};
|
||||
use std::collections::{BTreeSet, HashMap, HashSet, hash_map};
|
||||
use std::ops::{ControlFlow, Range};
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -227,6 +227,25 @@ impl Timeline {
|
||||
pending_nblocks: 0,
|
||||
pending_directory_entries: Vec::new(),
|
||||
pending_metadata_bytes: 0,
|
||||
is_importing_pgdata: false,
|
||||
lsn,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn begin_modification_for_import(&self, lsn: Lsn) -> DatadirModification
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
DatadirModification {
|
||||
tline: self,
|
||||
pending_lsns: Vec::new(),
|
||||
pending_metadata_pages: HashMap::new(),
|
||||
pending_data_batch: None,
|
||||
pending_deletions: Vec::new(),
|
||||
pending_nblocks: 0,
|
||||
pending_directory_entries: Vec::new(),
|
||||
pending_metadata_bytes: 0,
|
||||
is_importing_pgdata: true,
|
||||
lsn,
|
||||
}
|
||||
}
|
||||
@@ -596,6 +615,50 @@ impl Timeline {
|
||||
self.get_rel_exists_in_reldir(tag, version, None, ctx).await
|
||||
}
|
||||
|
||||
async fn get_rel_exists_in_reldir_v1(
|
||||
&self,
|
||||
tag: RelTag,
|
||||
version: Version<'_>,
|
||||
deserialized_reldir_v1: Option<(Key, &RelDirectory)>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<bool, PageReconstructError> {
|
||||
let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
|
||||
if let Some((cached_key, dir)) = deserialized_reldir_v1 {
|
||||
if cached_key == key {
|
||||
return Ok(dir.rels.contains(&(tag.relnode, tag.forknum)));
|
||||
} else if cfg!(test) || cfg!(feature = "testing") {
|
||||
panic!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
} else {
|
||||
warn!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
}
|
||||
// Fallback to reading the directory from the datadir.
|
||||
}
|
||||
|
||||
let buf = version.get(self, key, ctx).await?;
|
||||
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
Ok(dir.rels.contains(&(tag.relnode, tag.forknum)))
|
||||
}
|
||||
|
||||
async fn get_rel_exists_in_reldir_v2(
|
||||
&self,
|
||||
tag: RelTag,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<bool, PageReconstructError> {
|
||||
let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum);
|
||||
let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?).map_err(
|
||||
|_| {
|
||||
PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: decode failed, {}",
|
||||
key
|
||||
))
|
||||
},
|
||||
)?;
|
||||
let exists_v2 = buf == RelDirExists::Exists;
|
||||
Ok(exists_v2)
|
||||
}
|
||||
|
||||
/// Does the relation exist? With a cached deserialized `RelDirectory`.
|
||||
///
|
||||
/// There are some cases where the caller loops across all relations. In that specific case,
|
||||
@@ -627,45 +690,134 @@ impl Timeline {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Read path: first read the new reldir keyspace. Early return if the relation exists.
|
||||
// Otherwise, read the old reldir keyspace.
|
||||
// TODO: if IndexPart::rel_size_migration is `Migrated`, we only need to read from v2.
|
||||
let (v2_status, migrated_lsn) = self.get_rel_size_v2_status();
|
||||
|
||||
if let RelSizeMigration::Migrated | RelSizeMigration::Migrating =
|
||||
self.get_rel_size_v2_status()
|
||||
{
|
||||
// fetch directory listing (new)
|
||||
let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum);
|
||||
let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?)
|
||||
.map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
|
||||
let exists_v2 = buf == RelDirExists::Exists;
|
||||
// Fast path: if the relation exists in the new format, return true.
|
||||
// TODO: we should have a verification mode that checks both keyspaces
|
||||
// to ensure the relation only exists in one of them.
|
||||
if exists_v2 {
|
||||
return Ok(true);
|
||||
match v2_status {
|
||||
RelSizeMigration::Legacy => {
|
||||
let v1_exists = self
|
||||
.get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx)
|
||||
.await?;
|
||||
Ok(v1_exists)
|
||||
}
|
||||
RelSizeMigration::Migrating | RelSizeMigration::Migrated
|
||||
if version.get_lsn() < migrated_lsn.unwrap_or(Lsn(0)) =>
|
||||
{
|
||||
// For requests below the migrated LSN, we still use the v1 read path.
|
||||
let v1_exists = self
|
||||
.get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx)
|
||||
.await?;
|
||||
Ok(v1_exists)
|
||||
}
|
||||
RelSizeMigration::Migrating => {
|
||||
let v1_exists = self
|
||||
.get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx)
|
||||
.await?;
|
||||
let v2_exists_res = self.get_rel_exists_in_reldir_v2(tag, version, ctx).await;
|
||||
match v2_exists_res {
|
||||
Ok(v2_exists) if v1_exists == v2_exists => {}
|
||||
Ok(v2_exists) => {
|
||||
tracing::warn!(
|
||||
"inconsistent v1/v2 reldir keyspace for rel {}: v1_exists={}, v2_exists={}",
|
||||
tag,
|
||||
v1_exists,
|
||||
v2_exists
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("failed to get rel exists in v2: {e}");
|
||||
}
|
||||
}
|
||||
Ok(v1_exists)
|
||||
}
|
||||
RelSizeMigration::Migrated => {
|
||||
let v2_exists = self.get_rel_exists_in_reldir_v2(tag, version, ctx).await?;
|
||||
Ok(v2_exists)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fetch directory listing (old)
|
||||
|
||||
let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
|
||||
|
||||
if let Some((cached_key, dir)) = deserialized_reldir_v1 {
|
||||
if cached_key == key {
|
||||
return Ok(dir.rels.contains(&(tag.relnode, tag.forknum)));
|
||||
} else if cfg!(test) || cfg!(feature = "testing") {
|
||||
panic!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
} else {
|
||||
warn!("cached reldir key mismatch: {cached_key} != {key}");
|
||||
}
|
||||
// Fallback to reading the directory from the datadir.
|
||||
}
|
||||
async fn list_rels_v1(
|
||||
&self,
|
||||
spcnode: Oid,
|
||||
dbnode: Oid,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<HashSet<RelTag>, PageReconstructError> {
|
||||
let key = rel_dir_to_key(spcnode, dbnode);
|
||||
let buf = version.get(self, key, ctx).await?;
|
||||
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
let exists_v1 = dir.rels.contains(&(tag.relnode, tag.forknum));
|
||||
Ok(exists_v1)
|
||||
let rels_v1: HashSet<RelTag> =
|
||||
HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: *relnode,
|
||||
forknum: *forknum,
|
||||
}));
|
||||
Ok(rels_v1)
|
||||
}
|
||||
|
||||
async fn list_rels_v2(
|
||||
&self,
|
||||
spcnode: Oid,
|
||||
dbnode: Oid,
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<HashSet<RelTag>, PageReconstructError> {
|
||||
let key_range = rel_tag_sparse_key_range(spcnode, dbnode);
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self.conf.get_vectored_concurrent_io,
|
||||
self.gate
|
||||
.enter()
|
||||
.map_err(|_| PageReconstructError::Cancelled)?,
|
||||
);
|
||||
let results = self
|
||||
.scan(
|
||||
KeySpace::single(key_range),
|
||||
version.get_lsn(),
|
||||
ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
let mut rels = HashSet::new();
|
||||
for (key, val) in results {
|
||||
let val = RelDirExists::decode(&val?).map_err(|_| {
|
||||
PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: decode failed, {}",
|
||||
key
|
||||
))
|
||||
})?;
|
||||
if key.field6 != 1 {
|
||||
return Err(PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: field6 != 1, {}",
|
||||
key
|
||||
)));
|
||||
}
|
||||
if key.field2 != spcnode {
|
||||
return Err(PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: field2 != spcnode, {}",
|
||||
key
|
||||
)));
|
||||
}
|
||||
if key.field3 != dbnode {
|
||||
return Err(PageReconstructError::Other(anyhow::anyhow!(
|
||||
"invalid reldir key: field3 != dbnode, {}",
|
||||
key
|
||||
)));
|
||||
}
|
||||
let tag = RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: key.field4,
|
||||
forknum: key.field5,
|
||||
};
|
||||
if val == RelDirExists::Removed {
|
||||
debug_assert!(!rels.contains(&tag), "removed reltag in v2");
|
||||
continue;
|
||||
}
|
||||
let did_not_contain = rels.insert(tag);
|
||||
debug_assert!(did_not_contain, "duplicate reltag in v2");
|
||||
}
|
||||
Ok(rels)
|
||||
}
|
||||
|
||||
/// Get a list of all existing relations in given tablespace and database.
|
||||
@@ -683,60 +835,45 @@ impl Timeline {
|
||||
version: Version<'_>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<HashSet<RelTag>, PageReconstructError> {
|
||||
// fetch directory listing (old)
|
||||
let key = rel_dir_to_key(spcnode, dbnode);
|
||||
let buf = version.get(self, key, ctx).await?;
|
||||
let (v2_status, migrated_lsn) = self.get_rel_size_v2_status();
|
||||
|
||||
let dir = RelDirectory::des(&buf)?;
|
||||
let rels_v1: HashSet<RelTag> =
|
||||
HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: *relnode,
|
||||
forknum: *forknum,
|
||||
}));
|
||||
|
||||
if let RelSizeMigration::Legacy = self.get_rel_size_v2_status() {
|
||||
return Ok(rels_v1);
|
||||
}
|
||||
|
||||
// scan directory listing (new), merge with the old results
|
||||
let key_range = rel_tag_sparse_key_range(spcnode, dbnode);
|
||||
let io_concurrency = IoConcurrency::spawn_from_conf(
|
||||
self.conf.get_vectored_concurrent_io,
|
||||
self.gate
|
||||
.enter()
|
||||
.map_err(|_| PageReconstructError::Cancelled)?,
|
||||
);
|
||||
let results = self
|
||||
.scan(
|
||||
KeySpace::single(key_range),
|
||||
version.get_lsn(),
|
||||
ctx,
|
||||
io_concurrency,
|
||||
)
|
||||
.await?;
|
||||
let mut rels = rels_v1;
|
||||
for (key, val) in results {
|
||||
let val = RelDirExists::decode(&val?)
|
||||
.map_err(|_| PageReconstructError::Other(anyhow::anyhow!("invalid reldir key")))?;
|
||||
assert_eq!(key.field6, 1);
|
||||
assert_eq!(key.field2, spcnode);
|
||||
assert_eq!(key.field3, dbnode);
|
||||
let tag = RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: key.field4,
|
||||
forknum: key.field5,
|
||||
};
|
||||
if val == RelDirExists::Removed {
|
||||
debug_assert!(!rels.contains(&tag), "removed reltag in v2");
|
||||
continue;
|
||||
match v2_status {
|
||||
RelSizeMigration::Legacy => {
|
||||
let rels_v1 = self.list_rels_v1(spcnode, dbnode, version, ctx).await?;
|
||||
Ok(rels_v1)
|
||||
}
|
||||
RelSizeMigration::Migrating | RelSizeMigration::Migrated
|
||||
if version.get_lsn() < migrated_lsn.unwrap_or(Lsn(0)) =>
|
||||
{
|
||||
// For requests below the migrated LSN, we still use the v1 read path.
|
||||
let rels_v1 = self.list_rels_v1(spcnode, dbnode, version, ctx).await?;
|
||||
Ok(rels_v1)
|
||||
}
|
||||
RelSizeMigration::Migrating => {
|
||||
let rels_v1 = self.list_rels_v1(spcnode, dbnode, version, ctx).await?;
|
||||
let rels_v2_res = self.list_rels_v2(spcnode, dbnode, version, ctx).await;
|
||||
match rels_v2_res {
|
||||
Ok(rels_v2) if rels_v1 == rels_v2 => {}
|
||||
Ok(rels_v2) => {
|
||||
tracing::warn!(
|
||||
"inconsistent v1/v2 reldir keyspace for db {} {}: v1_rels.len()={}, v2_rels.len()={}",
|
||||
spcnode,
|
||||
dbnode,
|
||||
rels_v1.len(),
|
||||
rels_v2.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("failed to list rels in v2: {e}");
|
||||
}
|
||||
}
|
||||
Ok(rels_v1)
|
||||
}
|
||||
RelSizeMigration::Migrated => {
|
||||
let rels_v2 = self.list_rels_v2(spcnode, dbnode, version, ctx).await?;
|
||||
Ok(rels_v2)
|
||||
}
|
||||
let did_not_contain = rels.insert(tag);
|
||||
debug_assert!(did_not_contain, "duplicate reltag in v2");
|
||||
}
|
||||
Ok(rels)
|
||||
}
|
||||
|
||||
/// Get the whole SLRU segment
|
||||
@@ -1258,10 +1395,10 @@ impl Timeline {
|
||||
let mut dbdir_cnt = 0;
|
||||
let mut rel_cnt = 0;
|
||||
|
||||
for (spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
for &(spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
dbdir_cnt += 1;
|
||||
for rel in self
|
||||
.list_rels(*spcnode, *dbnode, Version::at(lsn), ctx)
|
||||
.list_rels(spcnode, dbnode, Version::at(lsn), ctx)
|
||||
.await?
|
||||
{
|
||||
rel_cnt += 1;
|
||||
@@ -1566,6 +1703,9 @@ pub struct DatadirModification<'a> {
|
||||
|
||||
/// An **approximation** of how many metadata bytes will be written to the EphemeralFile.
|
||||
pending_metadata_bytes: usize,
|
||||
|
||||
/// Whether we are importing a pgdata directory.
|
||||
is_importing_pgdata: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
@@ -1578,6 +1718,14 @@ pub enum MetricsUpdate {
|
||||
Sub(u64),
|
||||
}
|
||||
|
||||
/// Controls the behavior of the reldir keyspace.
|
||||
pub struct RelDirMode {
|
||||
// Whether we can read the v2 keyspace or not.
|
||||
current_status: RelSizeMigration,
|
||||
// Whether we should initialize the v2 keyspace or not.
|
||||
initialize: bool,
|
||||
}
|
||||
|
||||
impl DatadirModification<'_> {
|
||||
// When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
|
||||
// contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
|
||||
@@ -1933,30 +2081,49 @@ impl DatadirModification<'_> {
|
||||
}
|
||||
|
||||
/// Returns `true` if the rel_size_v2 write path is enabled. If it is the first time that
|
||||
/// we enable it, we also need to persist it in `index_part.json`.
|
||||
pub fn maybe_enable_rel_size_v2(&mut self) -> anyhow::Result<bool> {
|
||||
let status = self.tline.get_rel_size_v2_status();
|
||||
/// we enable it, we also need to persist it in `index_part.json` (initialize is true).
|
||||
///
|
||||
/// As this function is only used on the write path, we do not need to read the migrated_at
|
||||
/// field.
|
||||
pub fn maybe_enable_rel_size_v2(&mut self, is_create: bool) -> anyhow::Result<RelDirMode> {
|
||||
// TODO: define the behavior of the tenant-level config flag and use feature flag to enable this feature
|
||||
|
||||
let (status, _) = self.tline.get_rel_size_v2_status();
|
||||
let config = self.tline.get_rel_size_v2_enabled();
|
||||
match (config, status) {
|
||||
(false, RelSizeMigration::Legacy) => {
|
||||
// tenant config didn't enable it and we didn't write any reldir_v2 key yet
|
||||
Ok(false)
|
||||
Ok(RelDirMode {
|
||||
current_status: RelSizeMigration::Legacy,
|
||||
initialize: false,
|
||||
})
|
||||
}
|
||||
(false, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
|
||||
(false, status @ RelSizeMigration::Migrating | status @ RelSizeMigration::Migrated) => {
|
||||
// index_part already persisted that the timeline has enabled rel_size_v2
|
||||
Ok(true)
|
||||
Ok(RelDirMode {
|
||||
current_status: status,
|
||||
initialize: false,
|
||||
})
|
||||
}
|
||||
(true, RelSizeMigration::Legacy) => {
|
||||
// The first time we enable it, we need to persist it in `index_part.json`
|
||||
self.tline
|
||||
.update_rel_size_v2_status(RelSizeMigration::Migrating)?;
|
||||
tracing::info!("enabled rel_size_v2");
|
||||
Ok(true)
|
||||
// The caller should update the reldir status once the initialization is done.
|
||||
//
|
||||
// Only initialize the v2 keyspace on new relation creation. No initialization
|
||||
// during `timeline_create` (TODO: fix this, we should allow, but currently it
|
||||
// hits consistency issues).
|
||||
Ok(RelDirMode {
|
||||
current_status: RelSizeMigration::Legacy,
|
||||
initialize: is_create && !self.is_importing_pgdata,
|
||||
})
|
||||
}
|
||||
(true, RelSizeMigration::Migrating | RelSizeMigration::Migrated) => {
|
||||
(true, status @ RelSizeMigration::Migrating | status @ RelSizeMigration::Migrated) => {
|
||||
// index_part already persisted that the timeline has enabled rel_size_v2
|
||||
// and we don't need to do anything
|
||||
Ok(true)
|
||||
Ok(RelDirMode {
|
||||
current_status: status,
|
||||
initialize: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1969,8 +2136,8 @@ impl DatadirModification<'_> {
|
||||
img: Bytes,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
let v2_enabled = self
|
||||
.maybe_enable_rel_size_v2()
|
||||
let v2_mode = self
|
||||
.maybe_enable_rel_size_v2(false)
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
|
||||
// Add it to the directory (if it doesn't exist already)
|
||||
@@ -1986,17 +2153,19 @@ impl DatadirModification<'_> {
|
||||
self.put(DBDIR_KEY, Value::Image(buf.into()));
|
||||
}
|
||||
if r.is_none() {
|
||||
// Create RelDirectory
|
||||
// TODO: if we have fully migrated to v2, no need to create this directory
|
||||
if v2_mode.current_status != RelSizeMigration::Legacy {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
}
|
||||
|
||||
// Create RelDirectory in v1 keyspace. TODO: if we have fully migrated to v2, no need to create this directory.
|
||||
// Some code path relies on this directory to be present. We should remove it once we starts to set tenants to
|
||||
// `RelSizeMigration::Migrated` state (currently we don't, all tenants will have `RelSizeMigration::Migrating`).
|
||||
let buf = RelDirectory::ser(&RelDirectory {
|
||||
rels: HashSet::new(),
|
||||
})?;
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
|
||||
if v2_enabled {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
}
|
||||
self.put(
|
||||
rel_dir_to_key(spcnode, dbnode),
|
||||
Value::Image(Bytes::from(buf)),
|
||||
@@ -2103,6 +2272,109 @@ impl DatadirModification<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn initialize_rel_size_v2_keyspace(
|
||||
&mut self,
|
||||
ctx: &RequestContext,
|
||||
dbdir: &DbDirectory,
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Copy everything from relv1 to relv2; TODO: check if there's any key in the v2 keyspace, if so, abort.
|
||||
tracing::info!("initializing rel_size_v2 keyspace");
|
||||
let mut rel_cnt = 0;
|
||||
// relmap_exists (the value of dbdirs hashmap) does not affect the migration: we need to copy things over anyways
|
||||
for &(spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
let rel_dir_key = rel_dir_to_key(spcnode, dbnode);
|
||||
let rel_dir = RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?;
|
||||
for (relnode, forknum) in rel_dir.rels {
|
||||
let sparse_rel_dir_key = rel_tag_sparse_key(spcnode, dbnode, relnode, forknum);
|
||||
self.put(
|
||||
sparse_rel_dir_key,
|
||||
Value::Image(RelDirExists::Exists.encode()),
|
||||
);
|
||||
tracing::info!(
|
||||
"migrated rel_size_v2: {}",
|
||||
RelTag {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode,
|
||||
forknum
|
||||
}
|
||||
);
|
||||
rel_cnt += 1;
|
||||
}
|
||||
}
|
||||
tracing::info!(
|
||||
"initialized rel_size_v2 keyspace at lsn {}: migrated {} relations",
|
||||
self.lsn,
|
||||
rel_cnt
|
||||
);
|
||||
self.tline
|
||||
.update_rel_size_v2_status(RelSizeMigration::Migrating, Some(self.lsn))
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
Ok::<_, WalIngestError>(())
|
||||
}
|
||||
|
||||
async fn put_rel_creation_v1(
|
||||
&mut self,
|
||||
rel: RelTag,
|
||||
dbdir_exists: bool,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Reldir v1 write path
|
||||
let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
|
||||
let mut rel_dir = if !dbdir_exists {
|
||||
// Create the RelDirectory
|
||||
RelDirectory::default()
|
||||
} else {
|
||||
// reldir already exists, fetch it
|
||||
RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?
|
||||
};
|
||||
|
||||
// Add the new relation to the rel directory entry, and write it back
|
||||
if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)))
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Add(1)));
|
||||
self.put(
|
||||
rel_dir_key,
|
||||
Value::Image(Bytes::from(RelDirectory::ser(&rel_dir)?)),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn put_rel_creation_v2(
|
||||
&mut self,
|
||||
rel: RelTag,
|
||||
dbdir_exists: bool,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Reldir v2 write path
|
||||
let sparse_rel_dir_key =
|
||||
rel_tag_sparse_key(rel.spcnode, rel.dbnode, rel.relnode, rel.forknum);
|
||||
// check if the rel_dir_key exists in v2
|
||||
let val = self.sparse_get(sparse_rel_dir_key, ctx).await?;
|
||||
let val = RelDirExists::decode_option(val)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidRelDirKey(sparse_rel_dir_key))?;
|
||||
if val == RelDirExists::Exists {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
self.put(
|
||||
sparse_rel_dir_key,
|
||||
Value::Image(RelDirExists::Exists.encode()),
|
||||
);
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Add(1)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a relation fork.
|
||||
///
|
||||
/// 'nblocks' is the initial size.
|
||||
@@ -2136,66 +2408,31 @@ impl DatadirModification<'_> {
|
||||
true
|
||||
};
|
||||
|
||||
let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
|
||||
let mut rel_dir = if !dbdir_exists {
|
||||
// Create the RelDirectory
|
||||
RelDirectory::default()
|
||||
} else {
|
||||
// reldir already exists, fetch it
|
||||
RelDirectory::des(&self.get(rel_dir_key, ctx).await?)?
|
||||
};
|
||||
|
||||
let v2_enabled = self
|
||||
.maybe_enable_rel_size_v2()
|
||||
let mut v2_mode = self
|
||||
.maybe_enable_rel_size_v2(true)
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
|
||||
if v2_enabled {
|
||||
if rel_dir.rels.contains(&(rel.relnode, rel.forknum)) {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
if v2_mode.initialize {
|
||||
if let Err(e) = self.initialize_rel_size_v2_keyspace(ctx, &dbdir).await {
|
||||
tracing::warn!("error initializing rel_size_v2 keyspace: {}", e);
|
||||
// TODO: circuit breaker so that it won't retry forever
|
||||
} else {
|
||||
v2_mode.current_status = RelSizeMigration::Migrating;
|
||||
}
|
||||
let sparse_rel_dir_key =
|
||||
rel_tag_sparse_key(rel.spcnode, rel.dbnode, rel.relnode, rel.forknum);
|
||||
// check if the rel_dir_key exists in v2
|
||||
let val = self.sparse_get(sparse_rel_dir_key, ctx).await?;
|
||||
let val = RelDirExists::decode_option(val)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidRelDirKey(sparse_rel_dir_key))?;
|
||||
if val == RelDirExists::Exists {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
|
||||
if v2_mode.current_status != RelSizeMigration::Migrated {
|
||||
self.put_rel_creation_v1(rel, dbdir_exists, ctx).await?;
|
||||
}
|
||||
|
||||
if v2_mode.current_status != RelSizeMigration::Legacy {
|
||||
let write_v2_res = self.put_rel_creation_v2(rel, dbdir_exists, ctx).await;
|
||||
if let Err(e) = write_v2_res {
|
||||
if v2_mode.current_status == RelSizeMigration::Migrated {
|
||||
return Err(e);
|
||||
}
|
||||
tracing::warn!("error writing rel_size_v2 keyspace: {}", e);
|
||||
}
|
||||
self.put(
|
||||
sparse_rel_dir_key,
|
||||
Value::Image(RelDirExists::Exists.encode()),
|
||||
);
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)));
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Set(0)));
|
||||
// We don't write `rel_dir_key -> rel_dir.rels` back to the storage in the v2 path unless it's the initial creation.
|
||||
// TODO: if we have fully migrated to v2, no need to create this directory. Otherwise, there
|
||||
// will be key not found errors if we don't create an empty one for rel_size_v2.
|
||||
self.put(
|
||||
rel_dir_key,
|
||||
Value::Image(Bytes::from(RelDirectory::ser(&RelDirectory::default())?)),
|
||||
);
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Add(1)));
|
||||
} else {
|
||||
// Add the new relation to the rel directory entry, and write it back
|
||||
if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
|
||||
Err(WalIngestErrorKind::RelationAlreadyExists(rel))?;
|
||||
}
|
||||
if !dbdir_exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Set(0)))
|
||||
}
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Add(1)));
|
||||
self.put(
|
||||
rel_dir_key,
|
||||
Value::Image(Bytes::from(RelDirectory::ser(&rel_dir)?)),
|
||||
);
|
||||
}
|
||||
|
||||
// Put size
|
||||
@@ -2270,15 +2507,12 @@ impl DatadirModification<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Drop some relations
|
||||
pub(crate) async fn put_rel_drops(
|
||||
async fn put_rel_drop_v1(
|
||||
&mut self,
|
||||
drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
let v2_enabled = self
|
||||
.maybe_enable_rel_size_v2()
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
) -> Result<BTreeSet<RelTag>, WalIngestError> {
|
||||
let mut dropped_rels = BTreeSet::new();
|
||||
for ((spc_node, db_node), rel_tags) in drop_relations {
|
||||
let dir_key = rel_dir_to_key(spc_node, db_node);
|
||||
let buf = self.get(dir_key, ctx).await?;
|
||||
@@ -2290,25 +2524,8 @@ impl DatadirModification<'_> {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::Rel, MetricsUpdate::Sub(1)));
|
||||
dirty = true;
|
||||
dropped_rels.insert(rel_tag);
|
||||
true
|
||||
} else if v2_enabled {
|
||||
// The rel is not found in the old reldir key, so we need to check the new sparse keyspace.
|
||||
// Note that a relation can only exist in one of the two keyspaces (guaranteed by the ingestion
|
||||
// logic).
|
||||
let key =
|
||||
rel_tag_sparse_key(spc_node, db_node, rel_tag.relnode, rel_tag.forknum);
|
||||
let val = RelDirExists::decode_option(self.sparse_get(key, ctx).await?)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidKey(key, self.lsn))?;
|
||||
if val == RelDirExists::Exists {
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Sub(1)));
|
||||
// put tombstone
|
||||
self.put(key, Value::Image(RelDirExists::Removed.encode()));
|
||||
// no need to set dirty to true
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
@@ -2331,7 +2548,67 @@ impl DatadirModification<'_> {
|
||||
self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
|
||||
}
|
||||
}
|
||||
Ok(dropped_rels)
|
||||
}
|
||||
|
||||
async fn put_rel_drop_v2(
|
||||
&mut self,
|
||||
drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<BTreeSet<RelTag>, WalIngestError> {
|
||||
let mut dropped_rels = BTreeSet::new();
|
||||
for ((spc_node, db_node), rel_tags) in drop_relations {
|
||||
for rel_tag in rel_tags {
|
||||
let key = rel_tag_sparse_key(spc_node, db_node, rel_tag.relnode, rel_tag.forknum);
|
||||
let val = RelDirExists::decode_option(self.sparse_get(key, ctx).await?)
|
||||
.map_err(|_| WalIngestErrorKind::InvalidKey(key, self.lsn))?;
|
||||
if val == RelDirExists::Exists {
|
||||
dropped_rels.insert(rel_tag);
|
||||
self.pending_directory_entries
|
||||
.push((DirectoryKind::RelV2, MetricsUpdate::Sub(1)));
|
||||
// put tombstone
|
||||
self.put(key, Value::Image(RelDirExists::Removed.encode()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(dropped_rels)
|
||||
}
|
||||
|
||||
/// Drop some relations
|
||||
pub(crate) async fn put_rel_drops(
|
||||
&mut self,
|
||||
drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<(), WalIngestError> {
|
||||
let v2_mode = self
|
||||
.maybe_enable_rel_size_v2(false)
|
||||
.map_err(WalIngestErrorKind::MaybeRelSizeV2Error)?;
|
||||
match v2_mode.current_status {
|
||||
RelSizeMigration::Legacy => {
|
||||
self.put_rel_drop_v1(drop_relations, ctx).await?;
|
||||
}
|
||||
RelSizeMigration::Migrating => {
|
||||
let dropped_rels_v1 = self.put_rel_drop_v1(drop_relations.clone(), ctx).await?;
|
||||
let dropped_rels_v2_res = self.put_rel_drop_v2(drop_relations, ctx).await;
|
||||
match dropped_rels_v2_res {
|
||||
Ok(dropped_rels_v2) => {
|
||||
if dropped_rels_v1 != dropped_rels_v2 {
|
||||
tracing::warn!(
|
||||
"inconsistent v1/v2 rel drop: dropped_rels_v1.len()={}, dropped_rels_v2.len()={}",
|
||||
dropped_rels_v1.len(),
|
||||
dropped_rels_v2.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("error dropping rels: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
RelSizeMigration::Migrated => {
|
||||
self.put_rel_drop_v2(drop_relations, ctx).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1205,6 +1205,7 @@ impl TenantShard {
|
||||
idempotency.clone(),
|
||||
index_part.gc_compaction.clone(),
|
||||
index_part.rel_size_migration.clone(),
|
||||
index_part.rel_size_migrated_at,
|
||||
ctx,
|
||||
)?;
|
||||
let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
|
||||
@@ -2584,6 +2585,7 @@ impl TenantShard {
|
||||
initdb_lsn,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
@@ -2913,6 +2915,7 @@ impl TenantShard {
|
||||
initdb_lsn,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
@@ -4342,6 +4345,7 @@ impl TenantShard {
|
||||
create_idempotency: CreateTimelineIdempotency,
|
||||
gc_compaction_state: Option<GcCompactionState>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<(Arc<Timeline>, RequestContext)> {
|
||||
let state = match cause {
|
||||
@@ -4376,6 +4380,7 @@ impl TenantShard {
|
||||
create_idempotency,
|
||||
gc_compaction_state,
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
self.cancel.child_token(),
|
||||
);
|
||||
|
||||
@@ -5085,6 +5090,7 @@ impl TenantShard {
|
||||
src_timeline.pg_version,
|
||||
);
|
||||
|
||||
let (rel_size_v2_status, rel_size_migrated_at) = src_timeline.get_rel_size_v2_status();
|
||||
let (uninitialized_timeline, _timeline_ctx) = self
|
||||
.prepare_new_timeline(
|
||||
dst_id,
|
||||
@@ -5092,7 +5098,8 @@ impl TenantShard {
|
||||
timeline_create_guard,
|
||||
start_lsn + 1,
|
||||
Some(Arc::clone(src_timeline)),
|
||||
Some(src_timeline.get_rel_size_v2_status()),
|
||||
Some(rel_size_v2_status),
|
||||
rel_size_migrated_at,
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
@@ -5379,6 +5386,7 @@ impl TenantShard {
|
||||
pgdata_lsn,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
@@ -5462,14 +5470,17 @@ impl TenantShard {
|
||||
start_lsn: Lsn,
|
||||
ancestor: Option<Arc<Timeline>>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<(UninitializedTimeline<'a>, RequestContext)> {
|
||||
let tenant_shard_id = self.tenant_shard_id;
|
||||
|
||||
let resources = self.build_timeline_resources(new_timeline_id);
|
||||
resources
|
||||
.remote_client
|
||||
.init_upload_queue_for_empty_remote(new_metadata, rel_size_v2_status.clone())?;
|
||||
resources.remote_client.init_upload_queue_for_empty_remote(
|
||||
new_metadata,
|
||||
rel_size_v2_status.clone(),
|
||||
rel_size_migrated_at,
|
||||
)?;
|
||||
|
||||
let (timeline_struct, timeline_ctx) = self
|
||||
.create_timeline_struct(
|
||||
@@ -5482,6 +5493,7 @@ impl TenantShard {
|
||||
create_guard.idempotency.clone(),
|
||||
None,
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
ctx,
|
||||
)
|
||||
.context("Failed to create timeline data structure")?;
|
||||
|
||||
@@ -443,7 +443,8 @@ impl RemoteTimelineClient {
|
||||
pub fn init_upload_queue_for_empty_remote(
|
||||
&self,
|
||||
local_metadata: &TimelineMetadata,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_v2_migration: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
) -> anyhow::Result<()> {
|
||||
// Set the maximum number of inprogress tasks to the remote storage concurrency. There's
|
||||
// certainly no point in starting more upload tasks than this.
|
||||
@@ -455,7 +456,8 @@ impl RemoteTimelineClient {
|
||||
let mut upload_queue = self.upload_queue.lock().unwrap();
|
||||
let initialized_queue =
|
||||
upload_queue.initialize_empty_remote(local_metadata, inprogress_limit)?;
|
||||
initialized_queue.dirty.rel_size_migration = rel_size_v2_status;
|
||||
initialized_queue.dirty.rel_size_migration = rel_size_v2_migration;
|
||||
initialized_queue.dirty.rel_size_migrated_at = rel_size_migrated_at;
|
||||
self.update_remote_physical_size_gauge(None);
|
||||
info!("initialized upload queue as empty");
|
||||
Ok(())
|
||||
@@ -994,10 +996,12 @@ impl RemoteTimelineClient {
|
||||
pub(crate) fn schedule_index_upload_for_rel_size_v2_status_update(
|
||||
self: &Arc<Self>,
|
||||
rel_size_v2_status: RelSizeMigration,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut guard = self.upload_queue.lock().unwrap();
|
||||
let upload_queue = guard.initialized_mut()?;
|
||||
upload_queue.dirty.rel_size_migration = Some(rel_size_v2_status);
|
||||
upload_queue.dirty.rel_size_migrated_at = rel_size_migrated_at;
|
||||
// TODO: allow this operation to bypass the validation check because we might upload the index part
|
||||
// with no layers but the flag updated. For now, we just modify the index part in memory and the next
|
||||
// upload will include the flag.
|
||||
|
||||
@@ -114,6 +114,11 @@ pub struct IndexPart {
|
||||
/// The timestamp when the timeline was marked invisible in synthetic size calculations.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) marked_invisible_at: Option<NaiveDateTime>,
|
||||
|
||||
/// The LSN at which we started the rel size migration. Accesses below this LSN should be
|
||||
/// processed with the v1 read path. Usually this LSN should be set together with `rel_size_migration`.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub(crate) rel_size_migrated_at: Option<Lsn>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
@@ -142,10 +147,12 @@ impl IndexPart {
|
||||
/// - 12: +l2_lsn
|
||||
/// - 13: +gc_compaction
|
||||
/// - 14: +marked_invisible_at
|
||||
const LATEST_VERSION: usize = 14;
|
||||
/// - 15: +rel_size_migrated_at
|
||||
const LATEST_VERSION: usize = 15;
|
||||
|
||||
// Versions we may see when reading from a bucket.
|
||||
pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14];
|
||||
pub const KNOWN_VERSIONS: &'static [usize] =
|
||||
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
|
||||
|
||||
pub const FILE_NAME: &'static str = "index_part.json";
|
||||
|
||||
@@ -165,6 +172,7 @@ impl IndexPart {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -475,6 +483,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -524,6 +533,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -574,6 +584,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -627,6 +638,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let empty_layers_parsed = IndexPart::from_json_bytes(empty_layers_json.as_bytes()).unwrap();
|
||||
@@ -675,6 +687,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -726,6 +739,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -782,6 +796,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -843,6 +858,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -905,6 +921,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -972,6 +989,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1052,6 +1070,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1133,6 +1152,7 @@ mod tests {
|
||||
l2_lsn: None,
|
||||
gc_compaction: None,
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1220,6 +1240,7 @@ mod tests {
|
||||
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
}),
|
||||
marked_invisible_at: None,
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
@@ -1308,6 +1329,97 @@ mod tests {
|
||||
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
}),
|
||||
marked_invisible_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
|
||||
rel_size_migrated_at: None,
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v15_rel_size_migrated_at_is_parsed() {
|
||||
let example = r#"{
|
||||
"version": 15,
|
||||
"layer_metadata":{
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
|
||||
},
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata": {
|
||||
"disk_consistent_lsn": "0/16960E8",
|
||||
"prev_record_lsn": "0/1696070",
|
||||
"ancestor_timeline": "e45a7f37d3ee2ff17dc14bf4f4e3f52e",
|
||||
"ancestor_lsn": "0/0",
|
||||
"latest_gc_cutoff_lsn": "0/1696070",
|
||||
"initdb_lsn": "0/1696070",
|
||||
"pg_version": 14
|
||||
},
|
||||
"gc_blocking": {
|
||||
"started_at": "2024-07-19T09:00:00.123",
|
||||
"reasons": ["DetachAncestor"]
|
||||
},
|
||||
"import_pgdata": {
|
||||
"V1": {
|
||||
"Done": {
|
||||
"idempotency_key": "specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5",
|
||||
"started_at": "2024-11-13T09:23:42.123",
|
||||
"finished_at": "2024-11-13T09:42:23.123"
|
||||
}
|
||||
}
|
||||
},
|
||||
"rel_size_migration": "legacy",
|
||||
"l2_lsn": "0/16960E8",
|
||||
"gc_compaction": {
|
||||
"last_completed_lsn": "0/16960E8"
|
||||
},
|
||||
"marked_invisible_at": "2023-07-31T09:00:00.123",
|
||||
"rel_size_migrated_at": "0/16960E8"
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 15,
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none(),
|
||||
shard: ShardIndex::unsharded()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata {
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none(),
|
||||
shard: ShardIndex::unsharded()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata: TimelineMetadata::new(
|
||||
Lsn::from_str("0/16960E8").unwrap(),
|
||||
Some(Lsn::from_str("0/1696070").unwrap()),
|
||||
Some(TimelineId::from_str("e45a7f37d3ee2ff17dc14bf4f4e3f52e").unwrap()),
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
gc_blocking: Some(GcBlocking {
|
||||
started_at: parse_naive_datetime("2024-07-19T09:00:00.123000000"),
|
||||
reasons: enumset::EnumSet::from_iter([GcBlockingReason::DetachAncestor]),
|
||||
}),
|
||||
last_aux_file_policy: Default::default(),
|
||||
archived_at: None,
|
||||
import_pgdata: Some(import_pgdata::index_part_format::Root::V1(import_pgdata::index_part_format::V1::Done(import_pgdata::index_part_format::Done{
|
||||
started_at: parse_naive_datetime("2024-11-13T09:23:42.123000000"),
|
||||
finished_at: parse_naive_datetime("2024-11-13T09:42:23.123000000"),
|
||||
idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new("specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5".to_string()),
|
||||
}))),
|
||||
rel_size_migration: Some(RelSizeMigration::Legacy),
|
||||
l2_lsn: Some("0/16960E8".parse::<Lsn>().unwrap()),
|
||||
gc_compaction: Some(GcCompactionState {
|
||||
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
}),
|
||||
marked_invisible_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
|
||||
rel_size_migrated_at: Some("0/16960E8".parse::<Lsn>().unwrap()),
|
||||
};
|
||||
|
||||
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
|
||||
|
||||
@@ -70,7 +70,7 @@ use tracing::*;
|
||||
use utils::generation::Generation;
|
||||
use utils::guard_arc_swap::GuardArcSwap;
|
||||
use utils::id::TimelineId;
|
||||
use utils::logging::{MonitorSlowFutureCallback, monitor_slow_future};
|
||||
use utils::logging::{MonitorSlowFutureCallback, log_slow, monitor_slow_future};
|
||||
use utils::lsn::{AtomicLsn, Lsn, RecordLsn};
|
||||
use utils::postgres_client::PostgresClientProtocol;
|
||||
use utils::rate_limit::RateLimit;
|
||||
@@ -441,7 +441,7 @@ pub struct Timeline {
|
||||
/// heatmap on demand.
|
||||
heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>,
|
||||
|
||||
pub(crate) rel_size_v2_status: ArcSwapOption<RelSizeMigration>,
|
||||
pub(crate) rel_size_v2_status: ArcSwap<(Option<RelSizeMigration>, Option<Lsn>)>,
|
||||
|
||||
wait_lsn_log_slow: tokio::sync::Semaphore,
|
||||
|
||||
@@ -2894,12 +2894,9 @@ impl Timeline {
|
||||
.unwrap_or(self.conf.default_tenant_conf.rel_size_v2_enabled)
|
||||
}
|
||||
|
||||
pub(crate) fn get_rel_size_v2_status(&self) -> RelSizeMigration {
|
||||
self.rel_size_v2_status
|
||||
.load()
|
||||
.as_ref()
|
||||
.map(|s| s.as_ref().clone())
|
||||
.unwrap_or(RelSizeMigration::Legacy)
|
||||
pub(crate) fn get_rel_size_v2_status(&self) -> (RelSizeMigration, Option<Lsn>) {
|
||||
let (status, migrated_at) = self.rel_size_v2_status.load().as_ref().clone();
|
||||
(status.unwrap_or(RelSizeMigration::Legacy), migrated_at)
|
||||
}
|
||||
|
||||
fn get_compaction_upper_limit(&self) -> usize {
|
||||
@@ -3174,6 +3171,7 @@ impl Timeline {
|
||||
create_idempotency: crate::tenant::CreateTimelineIdempotency,
|
||||
gc_compaction_state: Option<GcCompactionState>,
|
||||
rel_size_v2_status: Option<RelSizeMigration>,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
cancel: CancellationToken,
|
||||
) -> Arc<Self> {
|
||||
let disk_consistent_lsn = metadata.disk_consistent_lsn();
|
||||
@@ -3338,7 +3336,10 @@ impl Timeline {
|
||||
|
||||
heatmap_layers_downloader: Mutex::new(None),
|
||||
|
||||
rel_size_v2_status: ArcSwapOption::from_pointee(rel_size_v2_status),
|
||||
rel_size_v2_status: ArcSwap::from_pointee((
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
)),
|
||||
|
||||
wait_lsn_log_slow: tokio::sync::Semaphore::new(1),
|
||||
|
||||
@@ -3426,11 +3427,17 @@ impl Timeline {
|
||||
pub(crate) fn update_rel_size_v2_status(
|
||||
&self,
|
||||
rel_size_v2_status: RelSizeMigration,
|
||||
rel_size_migrated_at: Option<Lsn>,
|
||||
) -> anyhow::Result<()> {
|
||||
self.rel_size_v2_status
|
||||
.store(Some(Arc::new(rel_size_v2_status.clone())));
|
||||
self.rel_size_v2_status.store(Arc::new((
|
||||
Some(rel_size_v2_status.clone()),
|
||||
rel_size_migrated_at,
|
||||
)));
|
||||
self.remote_client
|
||||
.schedule_index_upload_for_rel_size_v2_status_update(rel_size_v2_status)
|
||||
.schedule_index_upload_for_rel_size_v2_status_update(
|
||||
rel_size_v2_status,
|
||||
rel_size_migrated_at,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
|
||||
@@ -6891,7 +6898,13 @@ impl Timeline {
|
||||
|
||||
write_guard.store_and_unlock(new_gc_cutoff)
|
||||
};
|
||||
waitlist.wait().await;
|
||||
let waitlist_wait_fut = std::pin::pin!(waitlist.wait());
|
||||
log_slow(
|
||||
"applied_gc_cutoff waitlist wait",
|
||||
Duration::from_secs(30),
|
||||
waitlist_wait_fut,
|
||||
)
|
||||
.await;
|
||||
|
||||
info!("GC starting");
|
||||
|
||||
|
||||
@@ -332,6 +332,7 @@ impl DeleteTimelineFlow {
|
||||
crate::tenant::CreateTimelineIdempotency::FailWithConflict, // doesn't matter what we put here
|
||||
None, // doesn't matter what we put here
|
||||
None, // doesn't matter what we put here
|
||||
None, // doesn't matter what we put here
|
||||
ctx,
|
||||
)
|
||||
.context("create_timeline_struct")?;
|
||||
|
||||
@@ -52,7 +52,7 @@ pub(crate) fn regenerate(
|
||||
};
|
||||
|
||||
// Express a static value for how many shards we may schedule on one node
|
||||
const MAX_SHARDS: u32 = 5000;
|
||||
const MAX_SHARDS: u32 = 2500;
|
||||
|
||||
let mut doc = PageserverUtilization {
|
||||
disk_usage_bytes: used,
|
||||
|
||||
@@ -33,6 +33,10 @@ SHLIB_LINK = -lcurl
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S), Darwin)
|
||||
SHLIB_LINK += -framework Security -framework CoreFoundation -framework SystemConfiguration
|
||||
|
||||
# Link against object files for the current macOS version, to avoid spurious linker warnings.
|
||||
MACOSX_DEPLOYMENT_TARGET := $(shell xcrun --sdk macosx --show-sdk-version)
|
||||
export MACOSX_DEPLOYMENT_TARGET
|
||||
endif
|
||||
|
||||
EXTENSION = neon
|
||||
|
||||
@@ -79,10 +79,6 @@
|
||||
#include "access/xlogrecovery.h"
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 160000
|
||||
typedef PGAlignedBlock PGIOAlignedBlock;
|
||||
#endif
|
||||
|
||||
#define NEON_PANIC_CONNECTION_STATE(shard_no, elvl, message, ...) \
|
||||
neon_shard_log(shard_no, elvl, "Broken connection state: " message, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "extension_server.h"
|
||||
#include "neon_utils.h"
|
||||
|
||||
static int extension_server_port = 0;
|
||||
int hadron_extension_server_port = 0;
|
||||
static int extension_server_request_timeout = 60;
|
||||
static int extension_server_connect_timeout = 60;
|
||||
|
||||
@@ -47,7 +47,7 @@ neon_download_extension_file_http(const char *filename, bool is_library)
|
||||
curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT, (long)extension_server_connect_timeout /* seconds */ );
|
||||
|
||||
compute_ctl_url = psprintf("http://localhost:%d/extension_server/%s%s",
|
||||
extension_server_port, filename, is_library ? "?is_library=true" : "");
|
||||
hadron_extension_server_port, filename, is_library ? "?is_library=true" : "");
|
||||
|
||||
elog(LOG, "Sending request to compute_ctl: %s", compute_ctl_url);
|
||||
|
||||
@@ -82,7 +82,7 @@ pg_init_extension_server()
|
||||
DefineCustomIntVariable("neon.extension_server_port",
|
||||
"connection string to the compute_ctl",
|
||||
NULL,
|
||||
&extension_server_port,
|
||||
&hadron_extension_server_port,
|
||||
0, 0, INT_MAX,
|
||||
PGC_POSTMASTER,
|
||||
0, /* no flags required */
|
||||
|
||||
@@ -635,6 +635,11 @@ lfc_init(void)
|
||||
NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump a list of pages that are currently in the LFC
|
||||
*
|
||||
* This is used to get a snapshot that can be used to prewarm the LFC later.
|
||||
*/
|
||||
FileCacheState*
|
||||
lfc_get_state(size_t max_entries)
|
||||
{
|
||||
@@ -2267,4 +2272,3 @@ get_prewarm_info(PG_FUNCTION_ARGS)
|
||||
|
||||
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#include <math.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "libpq-int.h"
|
||||
|
||||
#include "access/xlog.h"
|
||||
@@ -86,6 +88,10 @@ static int pageserver_response_log_timeout = 10000;
|
||||
/* 2.5 minutes. A bit higher than highest default TCP retransmission timeout */
|
||||
static int pageserver_response_disconnect_timeout = 150000;
|
||||
|
||||
static int conf_refresh_reconnect_attempt_threshold = 16;
|
||||
// Hadron: timeout for refresh errors (1 minute)
|
||||
static uint64 kRefreshErrorTimeoutUSec = 1 * USECS_PER_MINUTE;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
char connstring[MAX_SHARDS][MAX_PAGESERVER_CONNSTRING_SIZE];
|
||||
@@ -130,7 +136,7 @@ static uint64 pagestore_local_counter = 0;
|
||||
typedef enum PSConnectionState {
|
||||
PS_Disconnected, /* no connection yet */
|
||||
PS_Connecting_Startup, /* connection starting up */
|
||||
PS_Connecting_PageStream, /* negotiating pagestream */
|
||||
PS_Connecting_PageStream, /* negotiating pagestream */
|
||||
PS_Connected, /* connected, pagestream established */
|
||||
} PSConnectionState;
|
||||
|
||||
@@ -401,7 +407,7 @@ get_shard_number(BufferTag *tag)
|
||||
}
|
||||
|
||||
static inline void
|
||||
CLEANUP_AND_DISCONNECT(PageServer *shard)
|
||||
CLEANUP_AND_DISCONNECT(PageServer *shard)
|
||||
{
|
||||
if (shard->wes_read)
|
||||
{
|
||||
@@ -423,7 +429,7 @@ CLEANUP_AND_DISCONNECT(PageServer *shard)
|
||||
* complete the connection (e.g. due to receiving an earlier cancellation
|
||||
* during connection start).
|
||||
* Returns true if successfully connected; false if the connection failed.
|
||||
*
|
||||
*
|
||||
* Throws errors in unrecoverable situations, or when this backend's query
|
||||
* is canceled.
|
||||
*/
|
||||
@@ -1030,6 +1036,101 @@ pageserver_disconnect_shard(shardno_t shard_no)
|
||||
shard->state = PS_Disconnected;
|
||||
}
|
||||
|
||||
// BEGIN HADRON
|
||||
/*
|
||||
* Nudge compute_ctl to refresh our configuration. Called when we suspect we may be
|
||||
* connecting to the wrong pageservers due to a stale configuration.
|
||||
*
|
||||
* This is a best-effort operation. If we couldn't send the local loopback HTTP request
|
||||
* to compute_ctl or if the request fails for any reason, we just log the error and move
|
||||
* on.
|
||||
*/
|
||||
|
||||
extern int hadron_extension_server_port;
|
||||
|
||||
// The timestamp (usec) of the first error that occurred while trying to refresh the configuration.
|
||||
// Will be reset to 0 after a successful refresh.
|
||||
static uint64 first_recorded_refresh_error_usec = 0;
|
||||
|
||||
// Request compute_ctl to refresh the configuration. This operation may fail, e.g., if the compute_ctl
|
||||
// is already in the configuration state. The function returns true if the caller needs to cancel the
|
||||
// current query to avoid dead/live lock.
|
||||
static bool
|
||||
hadron_request_configuration_refresh() {
|
||||
static CURL *handle = NULL;
|
||||
CURLcode res;
|
||||
char *compute_ctl_url;
|
||||
bool cancel_query = false;
|
||||
|
||||
if (!lakebase_mode)
|
||||
return false;
|
||||
|
||||
if (handle == NULL)
|
||||
{
|
||||
handle = alloc_curl_handle();
|
||||
|
||||
curl_easy_setopt(handle, CURLOPT_CUSTOMREQUEST, "POST");
|
||||
curl_easy_setopt(handle, CURLOPT_TIMEOUT, 3L /* seconds */ );
|
||||
curl_easy_setopt(handle, CURLOPT_POSTFIELDS, "");
|
||||
}
|
||||
|
||||
// Set the URL
|
||||
compute_ctl_url = psprintf("http://localhost:%d/refresh_configuration", hadron_extension_server_port);
|
||||
|
||||
|
||||
elog(LOG, "Sending refresh configuration request to compute_ctl: %s", compute_ctl_url);
|
||||
|
||||
curl_easy_setopt(handle, CURLOPT_URL, compute_ctl_url);
|
||||
|
||||
res = curl_easy_perform(handle);
|
||||
if (res != CURLE_OK )
|
||||
{
|
||||
elog(WARNING, "refresh_configuration request failed: %s\n", curl_easy_strerror(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
long http_code = 0;
|
||||
curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &http_code);
|
||||
if ( res != CURLE_OK )
|
||||
{
|
||||
elog(WARNING, "compute_ctl refresh_configuration request getinfo failed: %s\n", curl_easy_strerror(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(LOG, "compute_ctl refresh_configuration got HTTP response: %ld\n", http_code);
|
||||
if( http_code == 200 )
|
||||
{
|
||||
first_recorded_refresh_error_usec = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (first_recorded_refresh_error_usec == 0)
|
||||
{
|
||||
first_recorded_refresh_error_usec = GetCurrentTimestamp();
|
||||
}
|
||||
else if(GetCurrentTimestamp() - first_recorded_refresh_error_usec > kRefreshErrorTimeoutUSec)
|
||||
{
|
||||
{
|
||||
first_recorded_refresh_error_usec = 0;
|
||||
cancel_query = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In regular Postgres usage, it is not necessary to manually free memory allocated by palloc (psprintf) because
|
||||
// it will be cleaned up after the "memory context" is reset (e.g. after the query or the transaction is finished).
|
||||
// However, the number of times this function gets called during a single query/transaction can be unbounded due to
|
||||
// the various retry loops around calls to pageservers. Therefore, we need to manually free this memory here.
|
||||
if (compute_ctl_url != NULL)
|
||||
{
|
||||
pfree(compute_ctl_url);
|
||||
}
|
||||
return cancel_query;
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
static bool
|
||||
pageserver_send(shardno_t shard_no, NeonRequest *request)
|
||||
{
|
||||
@@ -1064,6 +1165,11 @@ pageserver_send(shardno_t shard_no, NeonRequest *request)
|
||||
while (!pageserver_connect(shard_no, shard->n_reconnect_attempts < max_reconnect_attempts ? LOG : ERROR))
|
||||
{
|
||||
shard->n_reconnect_attempts += 1;
|
||||
if (shard->n_reconnect_attempts > conf_refresh_reconnect_attempt_threshold
|
||||
&& hadron_request_configuration_refresh() )
|
||||
{
|
||||
neon_shard_log(shard_no, ERROR, "request failed too many times, cancelling query");
|
||||
}
|
||||
}
|
||||
shard->n_reconnect_attempts = 0;
|
||||
} else {
|
||||
@@ -1171,17 +1277,26 @@ pageserver_receive(shardno_t shard_no)
|
||||
pfree(msg);
|
||||
pageserver_disconnect(shard_no);
|
||||
resp = NULL;
|
||||
|
||||
/*
|
||||
* Always poke compute_ctl to request a configuration refresh if we have issues receiving data from pageservers after
|
||||
* successfully connecting to it. It could be an indication that we are connecting to the wrong pageservers (e.g. PS
|
||||
* is in secondary mode or otherwise refuses to respond our request).
|
||||
*/
|
||||
hadron_request_configuration_refresh();
|
||||
}
|
||||
else if (rc == -2)
|
||||
{
|
||||
char *msg = pchomp(PQerrorMessage(pageserver_conn));
|
||||
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: could not read COPY data: %s", msg);
|
||||
}
|
||||
else
|
||||
{
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc);
|
||||
}
|
||||
|
||||
@@ -1249,21 +1364,34 @@ pageserver_try_receive(shardno_t shard_no)
|
||||
neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: psql end of copy data: %s", pchomp(PQerrorMessage(pageserver_conn)));
|
||||
pageserver_disconnect(shard_no);
|
||||
resp = NULL;
|
||||
hadron_request_configuration_refresh();
|
||||
}
|
||||
else if (rc == -2)
|
||||
{
|
||||
char *msg = pchomp(PQerrorMessage(pageserver_conn));
|
||||
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: could not read COPY data: %s", msg);
|
||||
resp = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Always poke compute_ctl to request a configuration refresh if we have issues receiving data from pageservers after
|
||||
* successfully connecting to it. It could be an indication that we are connecting to the wrong pageservers (e.g. PS
|
||||
* is in secondary mode or otherwise refuses to respond our request).
|
||||
*/
|
||||
if ( rc < 0 && hadron_request_configuration_refresh() )
|
||||
{
|
||||
neon_shard_log(shard_no, ERROR, "refresh_configuration request failed, cancelling query");
|
||||
}
|
||||
|
||||
shard->nresponses_received++;
|
||||
return (NeonResponse *) resp;
|
||||
}
|
||||
@@ -1460,6 +1588,16 @@ pg_init_libpagestore(void)
|
||||
PGC_SU_BACKEND,
|
||||
0, /* no flags required */
|
||||
NULL, NULL, NULL);
|
||||
DefineCustomIntVariable("hadron.conf_refresh_reconnect_attempt_threshold",
|
||||
"Threshold of the number of consecutive failed pageserver "
|
||||
"connection attempts (per shard) before signaling "
|
||||
"compute_ctl for a configuration refresh.",
|
||||
NULL,
|
||||
&conf_refresh_reconnect_attempt_threshold,
|
||||
16, 0, INT_MAX,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable("neon.pageserver_response_log_timeout",
|
||||
"pageserver response log timeout",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* neon.c
|
||||
* Main entry point into the neon exension
|
||||
* Main entry point into the neon extension
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -48,6 +48,7 @@
|
||||
PG_MODULE_MAGIC;
|
||||
void _PG_init(void);
|
||||
|
||||
bool lakebase_mode = false;
|
||||
|
||||
static int running_xacts_overflow_policy;
|
||||
static bool monitor_query_exec_time = false;
|
||||
@@ -507,7 +508,7 @@ _PG_init(void)
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"neon.disable_logical_replication_subscribers",
|
||||
"Disables incomming logical replication",
|
||||
"Disable incoming logical replication",
|
||||
NULL,
|
||||
&disable_logical_replication_subscribers,
|
||||
false,
|
||||
@@ -566,7 +567,7 @@ _PG_init(void)
|
||||
|
||||
DefineCustomEnumVariable(
|
||||
"neon.debug_compare_local",
|
||||
"Debug mode for compaing content of pages in prefetch ring/LFC/PS and local disk",
|
||||
"Debug mode for comparing content of pages in prefetch ring/LFC/PS and local disk",
|
||||
NULL,
|
||||
&debug_compare_local,
|
||||
DEBUG_COMPARE_LOCAL_NONE,
|
||||
@@ -583,6 +584,16 @@ _PG_init(void)
|
||||
"neon_superuser",
|
||||
PGC_POSTMASTER, 0, NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"neon.lakebase_mode",
|
||||
"Is neon running in Lakebase?",
|
||||
NULL,
|
||||
&lakebase_mode,
|
||||
false,
|
||||
PGC_POSTMASTER,
|
||||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/*
|
||||
* Important: This must happen after other parts of the extension are
|
||||
* loaded, otherwise any settings to GUCs that were set before the
|
||||
@@ -724,7 +735,6 @@ neon_shmem_request_hook(void)
|
||||
static void
|
||||
neon_shmem_startup_hook(void)
|
||||
{
|
||||
/* Initialize */
|
||||
if (prev_shmem_startup_hook)
|
||||
prev_shmem_startup_hook();
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ extern int wal_acceptor_reconnect_timeout;
|
||||
extern int wal_acceptor_connection_timeout;
|
||||
extern int readahead_getpage_pull_timeout_ms;
|
||||
extern bool disable_wal_prev_lsn_checks;
|
||||
extern bool lakebase_mode;
|
||||
|
||||
extern bool AmPrewarmWorker;
|
||||
|
||||
|
||||
@@ -167,11 +167,7 @@ extern neon_per_backend_counters *neon_per_backend_counters_shared;
|
||||
*/
|
||||
#define NUM_NEON_PERF_COUNTER_SLOTS (MaxBackends + NUM_AUXILIARY_PROCS)
|
||||
|
||||
#if PG_VERSION_NUM >= 170000
|
||||
#define MyNeonCounters (&neon_per_backend_counters_shared[MyProcNumber])
|
||||
#else
|
||||
#define MyNeonCounters (&neon_per_backend_counters_shared[MyProc->pgprocno])
|
||||
#endif
|
||||
|
||||
extern void inc_getpage_wait(uint64 latency);
|
||||
extern void inc_page_cache_read_wait(uint64 latency);
|
||||
|
||||
@@ -9,6 +9,10 @@
|
||||
#include "fmgr.h"
|
||||
#include "storage/buf_internals.h"
|
||||
|
||||
#if PG_MAJORVERSION_NUM < 16
|
||||
typedef PGAlignedBlock PGIOAlignedBlock;
|
||||
#endif
|
||||
|
||||
#if PG_MAJORVERSION_NUM < 17
|
||||
#define NRelFileInfoBackendIsTemp(rinfo) (rinfo.backend != InvalidBackendId)
|
||||
#else
|
||||
@@ -158,6 +162,10 @@ InitBufferTag(BufferTag *tag, const RelFileNode *rnode,
|
||||
#define AmAutoVacuumWorkerProcess() (IsAutoVacuumWorkerProcess())
|
||||
#endif
|
||||
|
||||
#if PG_MAJORVERSION_NUM < 17
|
||||
#define MyProcNumber (MyProc - &ProcGlobal->allProcs[0])
|
||||
#endif
|
||||
|
||||
#if PG_MAJORVERSION_NUM < 15
|
||||
extern void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags);
|
||||
extern TimeLineID GetWALInsertionTimeLine(void);
|
||||
|
||||
@@ -72,10 +72,6 @@
|
||||
#include "access/xlogrecovery.h"
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 160000
|
||||
typedef PGAlignedBlock PGIOAlignedBlock;
|
||||
#endif
|
||||
|
||||
#include "access/nbtree.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "access/xlog_internal.h"
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "neon.h"
|
||||
#include "neon_pgversioncompat.h"
|
||||
|
||||
#include "miscadmin.h"
|
||||
#include "pagestore_client.h"
|
||||
#include RELFILEINFO_HDR
|
||||
#include "storage/smgr.h"
|
||||
@@ -23,10 +24,6 @@
|
||||
#include "utils/dynahash.h"
|
||||
#include "utils/guc.h"
|
||||
|
||||
#if PG_VERSION_NUM >= 150000
|
||||
#include "miscadmin.h"
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NRelFileInfo rinfo;
|
||||
|
||||
@@ -389,12 +389,21 @@ typedef struct PageserverFeedback
|
||||
*/
|
||||
typedef struct WalRateLimiter
|
||||
{
|
||||
/* If the value is 1, PG backends will hit backpressure. */
|
||||
/* The effective wal write rate. Could be changed dynamically
|
||||
based on whether PG has backpressure or not.*/
|
||||
pg_atomic_uint32 effective_max_wal_bytes_per_second;
|
||||
/* If the value is 1, PG backends will hit backpressure until the time has past batch_end_time_us. */
|
||||
pg_atomic_uint32 should_limit;
|
||||
/* The number of bytes sent in the current second. */
|
||||
uint64 sent_bytes;
|
||||
/* The last recorded time in microsecond. */
|
||||
pg_atomic_uint64 last_recorded_time_us;
|
||||
/* The timestamp when the write starts in the current batch. A batch is a time interval (e.g., )that we
|
||||
track and throttle writes. Most times a batch is 1s, but it could become larger if the PG overwrites the WALs
|
||||
and we will adjust the batch accordingly to compensate (e.g., if PG writes 10MB at once and max WAL write rate
|
||||
is 1MB/s, then the current batch will become 10s). */
|
||||
pg_atomic_uint64 batch_start_time_us;
|
||||
/* The timestamp (in the future) that the current batch should end and accept more writes
|
||||
(after should_limit is set to 1). */
|
||||
pg_atomic_uint64 batch_end_time_us;
|
||||
} WalRateLimiter;
|
||||
/* END_HADRON */
|
||||
|
||||
|
||||
@@ -68,6 +68,14 @@ int safekeeper_proto_version = 3;
|
||||
char *safekeeper_conninfo_options = "";
|
||||
/* BEGIN_HADRON */
|
||||
int databricks_max_wal_mb_per_second = -1;
|
||||
// during throttling, we will limit the effective WAL write rate to 10KB.
|
||||
// PG can still push some WAL to SK, but at a very low rate.
|
||||
int databricks_throttled_max_wal_bytes_per_second = 10 * 1024;
|
||||
// The max sleep time of a batch. This is to make sure the rate limiter does not
|
||||
// overshoot too much and block PG for a very long time.
|
||||
// This is set as 5 minuetes for now. PG can send as much as 10MB of WALs to SK in one batch,
|
||||
// so this effectively caps the write rate to ~30KB/s in the worst case.
|
||||
static uint64 kRateLimitMaxBatchUSecs = 300 * USECS_PER_SEC;
|
||||
/* END_HADRON */
|
||||
|
||||
/* Set to true in the walproposer bgw. */
|
||||
@@ -86,6 +94,7 @@ static HotStandbyFeedback agg_hs_feedback;
|
||||
static void nwp_register_gucs(void);
|
||||
static void assign_neon_safekeepers(const char *newval, void *extra);
|
||||
static uint64 backpressure_lag_impl(void);
|
||||
static uint64 hadron_backpressure_lag_impl(void);
|
||||
static uint64 startup_backpressure_wrap(void);
|
||||
static bool backpressure_throttling_impl(void);
|
||||
static void walprop_register_bgworker(void);
|
||||
@@ -110,9 +119,22 @@ static void rm_safekeeper_event_set(Safekeeper *to_remove, bool is_sk);
|
||||
|
||||
static void CheckGracefulShutdown(WalProposer *wp);
|
||||
|
||||
// HADRON
|
||||
/* BEGIN_HADRON */
|
||||
shardno_t get_num_shards(void);
|
||||
|
||||
static int positive_mb_to_bytes(int mb)
|
||||
{
|
||||
if (mb <= 0)
|
||||
{
|
||||
return mb;
|
||||
}
|
||||
else
|
||||
{
|
||||
return mb * 1024 * 1024;
|
||||
}
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
static void
|
||||
init_walprop_config(bool syncSafekeepers)
|
||||
{
|
||||
@@ -260,6 +282,16 @@ nwp_register_gucs(void)
|
||||
PGC_SUSET,
|
||||
GUC_UNIT_MB,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"databricks.throttled_max_wal_bytes_per_second",
|
||||
"The maximum WAL bytes per second when PG is being throttled.",
|
||||
NULL,
|
||||
&databricks_throttled_max_wal_bytes_per_second,
|
||||
10 * 1024, 0, INT_MAX,
|
||||
PGC_SUSET,
|
||||
GUC_UNIT_BYTE,
|
||||
NULL, NULL, NULL);
|
||||
/* END_HADRON */
|
||||
}
|
||||
|
||||
@@ -398,19 +430,65 @@ assign_neon_safekeepers(const char *newval, void *extra)
|
||||
pfree(oldval);
|
||||
}
|
||||
|
||||
/* Check if we need to suspend inserts because of lagging replication. */
|
||||
static uint64
|
||||
backpressure_lag_impl(void)
|
||||
/* BEGIN_HADRON */
|
||||
static uint64 hadron_backpressure_lag_impl(void)
|
||||
{
|
||||
struct WalproposerShmemState* state = NULL;
|
||||
uint64 lag = 0;
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
if(max_cluster_size < 0){
|
||||
// if max cluster size is not set, then we don't apply backpressure because we're reconfiguring PG
|
||||
return 0;
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
lag = backpressure_lag_impl();
|
||||
state = GetWalpropShmemState();
|
||||
if ( state != NULL && databricks_max_wal_mb_per_second != -1 )
|
||||
{
|
||||
int old_limit = pg_atomic_read_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second);
|
||||
int new_limit = (lag == 0)? positive_mb_to_bytes(databricks_max_wal_mb_per_second) : databricks_throttled_max_wal_bytes_per_second;
|
||||
if( old_limit != new_limit )
|
||||
{
|
||||
uint64 batch_start_time = pg_atomic_read_u64(&state->wal_rate_limiter.batch_start_time_us);
|
||||
uint64 batch_end_time = pg_atomic_read_u64(&state->wal_rate_limiter.batch_end_time_us);
|
||||
// the rate limit has changed, we need to reset the rate limiter's batch end time
|
||||
pg_atomic_write_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second, new_limit);
|
||||
pg_atomic_write_u64(&state->wal_rate_limiter.batch_end_time_us, Min(batch_start_time + USECS_PER_SEC, batch_end_time));
|
||||
}
|
||||
if( new_limit == -1 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pg_atomic_read_u32(&state->wal_rate_limiter.should_limit) == true)
|
||||
{
|
||||
TimestampTz now = GetCurrentTimestamp();
|
||||
struct WalRateLimiter *limiter = &state->wal_rate_limiter;
|
||||
uint64 batch_end_time = pg_atomic_read_u64(&limiter->batch_end_time_us);
|
||||
if ( now >= batch_end_time )
|
||||
{
|
||||
/*
|
||||
* The backend has past the batch end time and it's time to push more WALs.
|
||||
* If the backends are pushing WALs too fast, the wal proposer will rate limit them again.
|
||||
*/
|
||||
uint32 expected = true;
|
||||
pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false);
|
||||
return 0;
|
||||
}
|
||||
return Max(lag, 1);
|
||||
}
|
||||
// rate limiter decides to not throttle, then return 0.
|
||||
return 0;
|
||||
}
|
||||
|
||||
return lag;
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
/* Check if we need to suspend inserts because of lagging replication. */
|
||||
static uint64
|
||||
backpressure_lag_impl(void)
|
||||
{
|
||||
if (max_replication_apply_lag > 0 || max_replication_flush_lag > 0 || max_replication_write_lag > 0)
|
||||
{
|
||||
XLogRecPtr writePtr;
|
||||
@@ -429,45 +507,47 @@ backpressure_lag_impl(void)
|
||||
LSN_FORMAT_ARGS(flushPtr),
|
||||
LSN_FORMAT_ARGS(applyPtr));
|
||||
|
||||
if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB))
|
||||
if (lakebase_mode)
|
||||
{
|
||||
return (myFlushLsn - writePtr - max_replication_write_lag * MB);
|
||||
}
|
||||
// in case PG does not have shard map initialized, we assume PG always has 1 shard at minimum.
|
||||
shardno_t num_shards = Max(1, get_num_shards());
|
||||
int tenant_max_replication_apply_lag = num_shards * max_replication_apply_lag;
|
||||
int tenant_max_replication_flush_lag = num_shards * max_replication_flush_lag;
|
||||
int tenant_max_replication_write_lag = num_shards * max_replication_write_lag;
|
||||
|
||||
if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - flushPtr - max_replication_flush_lag * MB);
|
||||
}
|
||||
if ((writePtr != InvalidXLogRecPtr && tenant_max_replication_write_lag > 0 && myFlushLsn > writePtr + tenant_max_replication_write_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - writePtr - tenant_max_replication_write_lag * MB);
|
||||
}
|
||||
|
||||
if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB))
|
||||
if ((flushPtr != InvalidXLogRecPtr && tenant_max_replication_flush_lag > 0 && myFlushLsn > flushPtr + tenant_max_replication_flush_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - flushPtr - tenant_max_replication_flush_lag * MB);
|
||||
}
|
||||
|
||||
if ((applyPtr != InvalidXLogRecPtr && tenant_max_replication_apply_lag > 0 && myFlushLsn > applyPtr + tenant_max_replication_apply_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - applyPtr - tenant_max_replication_apply_lag * MB);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return (myFlushLsn - applyPtr - max_replication_apply_lag * MB);
|
||||
if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - writePtr - max_replication_write_lag * MB);
|
||||
}
|
||||
|
||||
if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - flushPtr - max_replication_flush_lag * MB);
|
||||
}
|
||||
|
||||
if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - applyPtr - max_replication_apply_lag * MB);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
if (databricks_max_wal_mb_per_second == -1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
state = GetWalpropShmemState();
|
||||
if (state != NULL && !!pg_atomic_read_u32(&state->wal_rate_limiter.should_limit))
|
||||
{
|
||||
TimestampTz now = GetCurrentTimestamp();
|
||||
struct WalRateLimiter *limiter = &state->wal_rate_limiter;
|
||||
uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us);
|
||||
if (now - last_recorded_time > USECS_PER_SEC)
|
||||
{
|
||||
/*
|
||||
* The backend has past 1 second since the last recorded time and it's time to push more WALs.
|
||||
* If the backends are pushing WALs too fast, the wal proposer will rate limit them again.
|
||||
*/
|
||||
uint32 expected = true;
|
||||
pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
/* END_HADRON */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -482,9 +562,9 @@ startup_backpressure_wrap(void)
|
||||
if (AmStartupProcess() || !IsUnderPostmaster)
|
||||
return 0;
|
||||
|
||||
delay_backend_us = &backpressure_lag_impl;
|
||||
delay_backend_us = &hadron_backpressure_lag_impl;
|
||||
|
||||
return backpressure_lag_impl();
|
||||
return hadron_backpressure_lag_impl();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -514,8 +594,10 @@ WalproposerShmemInit(void)
|
||||
pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->currentClusterSize, 0);
|
||||
/* BEGIN_HADRON */
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.effective_max_wal_bytes_per_second, -1);
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_start_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_end_time_us, 0);
|
||||
/* END_HADRON */
|
||||
}
|
||||
}
|
||||
@@ -530,8 +612,10 @@ WalproposerShmemInit_SyncSafekeeper(void)
|
||||
pg_atomic_init_u64(&walprop_shared->mineLastElectedTerm, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0);
|
||||
/* BEGIN_HADRON */
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.effective_max_wal_bytes_per_second, -1);
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_start_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_end_time_us, 0);
|
||||
/* END_HADRON */
|
||||
}
|
||||
|
||||
@@ -563,7 +647,7 @@ backpressure_throttling_impl(void)
|
||||
return retry;
|
||||
|
||||
/* Calculate replicas lag */
|
||||
lag = backpressure_lag_impl();
|
||||
lag = hadron_backpressure_lag_impl();
|
||||
if (lag == 0)
|
||||
return retry;
|
||||
|
||||
@@ -659,7 +743,7 @@ record_pageserver_feedback(PageserverFeedback *ps_feedback, shardno_t num_shards
|
||||
|
||||
SpinLockAcquire(&walprop_shared->mutex);
|
||||
|
||||
// Hadron: Update the num_shards from the source-of-truth (shard map) lazily when we receive
|
||||
// Hadron: Update the num_shards from the source-of-truth (shard map) lazily when we receive
|
||||
// a new pageserver feedback.
|
||||
walprop_shared->num_shards = Max(walprop_shared->num_shards, num_shards);
|
||||
|
||||
@@ -1479,6 +1563,7 @@ XLogBroadcastWalProposer(WalProposer *wp)
|
||||
XLogRecPtr endptr;
|
||||
struct WalproposerShmemState *state = NULL;
|
||||
TimestampTz now = 0;
|
||||
int effective_max_wal_bytes_per_second = 0;
|
||||
|
||||
/* Start from the last sent position */
|
||||
startptr = sentPtr;
|
||||
@@ -1533,22 +1618,36 @@ XLogBroadcastWalProposer(WalProposer *wp)
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
state = GetWalpropShmemState();
|
||||
if (databricks_max_wal_mb_per_second != -1 && state != NULL)
|
||||
effective_max_wal_bytes_per_second = pg_atomic_read_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second);
|
||||
if (effective_max_wal_bytes_per_second != -1 && state != NULL)
|
||||
{
|
||||
uint64 max_wal_bytes = (uint64) databricks_max_wal_mb_per_second * 1024 * 1024;
|
||||
struct WalRateLimiter *limiter = &state->wal_rate_limiter;
|
||||
uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us);
|
||||
if (now - last_recorded_time > USECS_PER_SEC)
|
||||
uint64 batch_end_time = pg_atomic_read_u64(&limiter->batch_end_time_us);
|
||||
if ( now >= batch_end_time )
|
||||
{
|
||||
/* Reset the rate limiter */
|
||||
// Reset the rate limiter to start a new batch
|
||||
limiter->sent_bytes = 0;
|
||||
pg_atomic_write_u64(&limiter->last_recorded_time_us, now);
|
||||
pg_atomic_write_u32(&limiter->should_limit, false);
|
||||
pg_atomic_write_u64(&limiter->batch_start_time_us, now);
|
||||
/* tentatively assign the batch end time as 1s from now. This could result in one of the following cases:
|
||||
1. If sent_bytes does not reach effective_max_wal_bytes_per_second in 1s,
|
||||
then we will reset the current batch and clear sent_bytes. No throttling happens.
|
||||
2. Otherwise, we will recompute the end time (below) based on how many bytes are actually written,
|
||||
and throttle PG until the batch end time. */
|
||||
pg_atomic_write_u64(&limiter->batch_end_time_us, now + USECS_PER_SEC);
|
||||
}
|
||||
limiter->sent_bytes += (endptr - startptr);
|
||||
if (limiter->sent_bytes > max_wal_bytes)
|
||||
if (limiter->sent_bytes > effective_max_wal_bytes_per_second)
|
||||
{
|
||||
uint64_t batch_start_time = pg_atomic_read_u64(&limiter->batch_start_time_us);
|
||||
uint64 throttle_usecs = USECS_PER_SEC * limiter->sent_bytes / Max(effective_max_wal_bytes_per_second, 1);
|
||||
if (throttle_usecs > kRateLimitMaxBatchUSecs){
|
||||
elog(LOG, "throttle_usecs %lu is too large, limiting to %lu", throttle_usecs, kRateLimitMaxBatchUSecs);
|
||||
throttle_usecs = kRateLimitMaxBatchUSecs;
|
||||
}
|
||||
|
||||
pg_atomic_write_u32(&limiter->should_limit, true);
|
||||
pg_atomic_write_u64(&limiter->batch_end_time_us, batch_start_time + throttle_usecs);
|
||||
}
|
||||
}
|
||||
/* END_HADRON */
|
||||
@@ -2052,7 +2151,7 @@ walprop_pg_process_safekeeper_feedback(WalProposer *wp, Safekeeper *sk)
|
||||
/* Only one main shard sends non-zero currentClusterSize */
|
||||
if (sk->appendResponse.ps_feedback.currentClusterSize > 0)
|
||||
SetNeonCurrentClusterSize(sk->appendResponse.ps_feedback.currentClusterSize);
|
||||
|
||||
|
||||
if (min_feedback.disk_consistent_lsn != standby_apply_lsn)
|
||||
{
|
||||
standby_apply_lsn = min_feedback.disk_consistent_lsn;
|
||||
|
||||
@@ -33,7 +33,6 @@ env_logger.workspace = true
|
||||
framed-websockets.workspace = true
|
||||
futures.workspace = true
|
||||
hashbrown.workspace = true
|
||||
hashlink.workspace = true
|
||||
hex.workspace = true
|
||||
hmac.workspace = true
|
||||
hostname.workspace = true
|
||||
@@ -54,6 +53,7 @@ json = { path = "../libs/proxy/json" }
|
||||
lasso = { workspace = true, features = ["multi-threaded"] }
|
||||
measured = { workspace = true, features = ["lasso"] }
|
||||
metrics.workspace = true
|
||||
moka.workspace = true
|
||||
once_cell.workspace = true
|
||||
opentelemetry = { workspace = true, features = ["trace"] }
|
||||
papaya = "0.2.0"
|
||||
@@ -107,10 +107,11 @@ uuid.workspace = true
|
||||
x509-cert.workspace = true
|
||||
redis.workspace = true
|
||||
zerocopy.workspace = true
|
||||
zeroize.workspace = true
|
||||
# uncomment this to use the real subzero-core crate
|
||||
# subzero-core = { git = "https://github.com/neondatabase/subzero", rev = "396264617e78e8be428682f87469bb25429af88a", features = ["postgresql"], optional = true }
|
||||
# this is a stub for the subzero-core crate
|
||||
subzero-core = { path = "./subzero_core", features = ["postgresql"], optional = true}
|
||||
subzero-core = { path = "../libs/proxy/subzero_core", features = ["postgresql"], optional = true}
|
||||
ouroboros = { version = "0.18", optional = true }
|
||||
|
||||
# jwt stuff
|
||||
|
||||
@@ -8,11 +8,12 @@ use tracing::{info, info_span};
|
||||
|
||||
use crate::auth::backend::ComputeUserInfo;
|
||||
use crate::cache::Cached;
|
||||
use crate::cache::node_info::CachedNodeInfo;
|
||||
use crate::compute::AuthInfo;
|
||||
use crate::config::AuthenticationConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::client::cplane_proxy_v1;
|
||||
use crate::control_plane::{self, CachedNodeInfo, NodeInfo};
|
||||
use crate::control_plane::{self, NodeInfo};
|
||||
use crate::error::{ReportableError, UserFacingError};
|
||||
use crate::pqproto::BeMessage;
|
||||
use crate::proxy::NeonOptions;
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::auth::{self, AuthFlow};
|
||||
use crate::config::AuthenticationConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::AuthSecret;
|
||||
use crate::intern::EndpointIdInt;
|
||||
use crate::intern::{EndpointIdInt, RoleNameInt};
|
||||
use crate::sasl;
|
||||
use crate::stream::{self, Stream};
|
||||
|
||||
@@ -25,13 +25,15 @@ pub(crate) async fn authenticate_cleartext(
|
||||
ctx.set_auth_method(crate::context::AuthMethod::Cleartext);
|
||||
|
||||
let ep = EndpointIdInt::from(&info.endpoint);
|
||||
let role = RoleNameInt::from(&info.user);
|
||||
|
||||
let auth_flow = AuthFlow::new(
|
||||
client,
|
||||
auth::CleartextPassword {
|
||||
secret,
|
||||
endpoint: ep,
|
||||
pool: config.thread_pool.clone(),
|
||||
role,
|
||||
pool: config.scram_thread_pool.clone(),
|
||||
},
|
||||
);
|
||||
let auth_outcome = {
|
||||
|
||||
@@ -16,16 +16,16 @@ use tracing::{debug, info};
|
||||
|
||||
use crate::auth::{self, ComputeUserInfoMaybeEndpoint, validate_password_and_exchange};
|
||||
use crate::cache::Cached;
|
||||
use crate::cache::node_info::CachedNodeInfo;
|
||||
use crate::config::AuthenticationConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::client::ControlPlaneClient;
|
||||
use crate::control_plane::errors::GetAuthInfoError;
|
||||
use crate::control_plane::messages::EndpointRateLimitConfig;
|
||||
use crate::control_plane::{
|
||||
self, AccessBlockerFlags, AuthSecret, CachedNodeInfo, ControlPlaneApi, EndpointAccessControl,
|
||||
RoleAccessControl,
|
||||
self, AccessBlockerFlags, AuthSecret, ControlPlaneApi, EndpointAccessControl, RoleAccessControl,
|
||||
};
|
||||
use crate::intern::EndpointIdInt;
|
||||
use crate::intern::{EndpointIdInt, RoleNameInt};
|
||||
use crate::pqproto::BeMessage;
|
||||
use crate::proxy::NeonOptions;
|
||||
use crate::proxy::wake_compute::WakeComputeBackend;
|
||||
@@ -273,9 +273,11 @@ async fn authenticate_with_secret(
|
||||
) -> auth::Result<ComputeCredentials> {
|
||||
if let Some(password) = unauthenticated_password {
|
||||
let ep = EndpointIdInt::from(&info.endpoint);
|
||||
let role = RoleNameInt::from(&info.user);
|
||||
|
||||
let auth_outcome =
|
||||
validate_password_and_exchange(&config.thread_pool, ep, &password, secret).await?;
|
||||
validate_password_and_exchange(&config.scram_thread_pool, ep, role, &password, secret)
|
||||
.await?;
|
||||
let keys = match auth_outcome {
|
||||
crate::sasl::Outcome::Success(key) => key,
|
||||
crate::sasl::Outcome::Failure(reason) => {
|
||||
@@ -433,11 +435,12 @@ mod tests {
|
||||
use super::auth_quirks;
|
||||
use super::jwt::JwkCache;
|
||||
use crate::auth::{ComputeUserInfoMaybeEndpoint, IpPattern};
|
||||
use crate::cache::node_info::CachedNodeInfo;
|
||||
use crate::config::AuthenticationConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::messages::EndpointRateLimitConfig;
|
||||
use crate::control_plane::{
|
||||
self, AccessBlockerFlags, CachedNodeInfo, EndpointAccessControl, RoleAccessControl,
|
||||
self, AccessBlockerFlags, EndpointAccessControl, RoleAccessControl,
|
||||
};
|
||||
use crate::proxy::NeonOptions;
|
||||
use crate::rate_limiter::EndpointRateLimiter;
|
||||
@@ -498,7 +501,7 @@ mod tests {
|
||||
|
||||
static CONFIG: Lazy<AuthenticationConfig> = Lazy::new(|| AuthenticationConfig {
|
||||
jwks_cache: JwkCache::default(),
|
||||
thread_pool: ThreadPool::new(1),
|
||||
scram_thread_pool: ThreadPool::new(1),
|
||||
scram_protocol_timeout: std::time::Duration::from_secs(5),
|
||||
ip_allowlist_check_enabled: true,
|
||||
is_vpc_acccess_proxy: false,
|
||||
|
||||
@@ -10,7 +10,7 @@ use super::backend::ComputeCredentialKeys;
|
||||
use super::{AuthError, PasswordHackPayload};
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::AuthSecret;
|
||||
use crate::intern::EndpointIdInt;
|
||||
use crate::intern::{EndpointIdInt, RoleNameInt};
|
||||
use crate::pqproto::{BeAuthenticationSaslMessage, BeMessage};
|
||||
use crate::sasl;
|
||||
use crate::scram::threadpool::ThreadPool;
|
||||
@@ -46,6 +46,7 @@ pub(crate) struct PasswordHack;
|
||||
pub(crate) struct CleartextPassword {
|
||||
pub(crate) pool: Arc<ThreadPool>,
|
||||
pub(crate) endpoint: EndpointIdInt,
|
||||
pub(crate) role: RoleNameInt,
|
||||
pub(crate) secret: AuthSecret,
|
||||
}
|
||||
|
||||
@@ -111,6 +112,7 @@ impl<S: AsyncRead + AsyncWrite + Unpin> AuthFlow<'_, S, CleartextPassword> {
|
||||
let outcome = validate_password_and_exchange(
|
||||
&self.state.pool,
|
||||
self.state.endpoint,
|
||||
self.state.role,
|
||||
password,
|
||||
self.state.secret,
|
||||
)
|
||||
@@ -165,13 +167,15 @@ impl<S: AsyncRead + AsyncWrite + Unpin> AuthFlow<'_, S, Scram<'_>> {
|
||||
pub(crate) async fn validate_password_and_exchange(
|
||||
pool: &ThreadPool,
|
||||
endpoint: EndpointIdInt,
|
||||
role: RoleNameInt,
|
||||
password: &[u8],
|
||||
secret: AuthSecret,
|
||||
) -> super::Result<sasl::Outcome<ComputeCredentialKeys>> {
|
||||
match secret {
|
||||
// perform scram authentication as both client and server to validate the keys
|
||||
AuthSecret::Scram(scram_secret) => {
|
||||
let outcome = crate::scram::exchange(pool, endpoint, &scram_secret, password).await?;
|
||||
let outcome =
|
||||
crate::scram::exchange(pool, endpoint, role, &scram_secret, password).await?;
|
||||
|
||||
let client_key = match outcome {
|
||||
sasl::Outcome::Success(client_key) => client_key,
|
||||
|
||||
@@ -29,7 +29,7 @@ use crate::config::{
|
||||
};
|
||||
use crate::control_plane::locks::ApiLocks;
|
||||
use crate::http::health_server::AppMetrics;
|
||||
use crate::metrics::{Metrics, ThreadPoolMetrics};
|
||||
use crate::metrics::{Metrics, ServiceInfo};
|
||||
use crate::rate_limiter::{EndpointRateLimiter, LeakyBucketConfig, RateBucketInfo};
|
||||
use crate::scram::threadpool::ThreadPool;
|
||||
use crate::serverless::cancel_set::CancelSet;
|
||||
@@ -114,8 +114,6 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
let _panic_hook_guard = utils::logging::replace_panic_hook_with_tracing_panic_hook();
|
||||
let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
|
||||
|
||||
Metrics::install(Arc::new(ThreadPoolMetrics::new(0)));
|
||||
|
||||
// TODO: refactor these to use labels
|
||||
debug!("Version: {GIT_VERSION}");
|
||||
debug!("Build_tag: {BUILD_TAG}");
|
||||
@@ -207,6 +205,11 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
endpoint_rate_limiter,
|
||||
);
|
||||
|
||||
Metrics::get()
|
||||
.service
|
||||
.info
|
||||
.set_label(ServiceInfo::running());
|
||||
|
||||
match futures::future::select(pin!(maintenance_tasks.join_next()), pin!(task)).await {
|
||||
// exit immediately on maintenance task completion
|
||||
Either::Left((Some(res), _)) => match crate::error::flatten_err(res)? {},
|
||||
@@ -279,7 +282,7 @@ fn build_config(args: &LocalProxyCliArgs) -> anyhow::Result<&'static ProxyConfig
|
||||
http_config,
|
||||
authentication_config: AuthenticationConfig {
|
||||
jwks_cache: JwkCache::default(),
|
||||
thread_pool: ThreadPool::new(0),
|
||||
scram_thread_pool: ThreadPool::new(0),
|
||||
scram_protocol_timeout: Duration::from_secs(10),
|
||||
ip_allowlist_check_enabled: true,
|
||||
is_vpc_acccess_proxy: false,
|
||||
|
||||
@@ -26,7 +26,7 @@ use utils::project_git_version;
|
||||
use utils::sentry_init::init_sentry;
|
||||
|
||||
use crate::context::RequestContext;
|
||||
use crate::metrics::{Metrics, ThreadPoolMetrics};
|
||||
use crate::metrics::{Metrics, ServiceInfo};
|
||||
use crate::pglb::TlsRequired;
|
||||
use crate::pqproto::FeStartupPacket;
|
||||
use crate::protocol2::ConnectionInfo;
|
||||
@@ -80,8 +80,6 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
let _panic_hook_guard = utils::logging::replace_panic_hook_with_tracing_panic_hook();
|
||||
let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
|
||||
|
||||
Metrics::install(Arc::new(ThreadPoolMetrics::new(0)));
|
||||
|
||||
let args = cli().get_matches();
|
||||
let destination: String = args
|
||||
.get_one::<String>("dest")
|
||||
@@ -135,6 +133,12 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
cancellation_token.clone(),
|
||||
))
|
||||
.map(crate::error::flatten_err);
|
||||
|
||||
Metrics::get()
|
||||
.service
|
||||
.info
|
||||
.set_label(ServiceInfo::running());
|
||||
|
||||
let signals_task = tokio::spawn(crate::signals::handle(cancellation_token, || {}));
|
||||
|
||||
// the signal task cant ever succeed.
|
||||
|
||||
@@ -40,7 +40,7 @@ use crate::config::{
|
||||
};
|
||||
use crate::context::parquet::ParquetUploadArgs;
|
||||
use crate::http::health_server::AppMetrics;
|
||||
use crate::metrics::Metrics;
|
||||
use crate::metrics::{Metrics, ServiceInfo};
|
||||
use crate::rate_limiter::{EndpointRateLimiter, RateBucketInfo, WakeComputeRateLimiter};
|
||||
use crate::redis::connection_with_credentials_provider::ConnectionWithCredentialsProvider;
|
||||
use crate::redis::kv_ops::RedisKVClient;
|
||||
@@ -535,12 +535,7 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
// add a task to flush the db_schema cache every 10 minutes
|
||||
#[cfg(feature = "rest_broker")]
|
||||
if let Some(db_schema_cache) = &config.rest_config.db_schema_cache {
|
||||
maintenance_tasks.spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(600)).await;
|
||||
db_schema_cache.flush();
|
||||
}
|
||||
});
|
||||
maintenance_tasks.spawn(db_schema_cache.maintain());
|
||||
}
|
||||
|
||||
if let Some(metrics_config) = &config.metric_collection {
|
||||
@@ -590,6 +585,11 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
Metrics::get()
|
||||
.service
|
||||
.info
|
||||
.set_label(ServiceInfo::running());
|
||||
|
||||
let maintenance = loop {
|
||||
// get one complete task
|
||||
match futures::future::select(
|
||||
@@ -617,7 +617,12 @@ pub async fn run() -> anyhow::Result<()> {
|
||||
/// ProxyConfig is created at proxy startup, and lives forever.
|
||||
fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> {
|
||||
let thread_pool = ThreadPool::new(args.scram_thread_pool_size);
|
||||
Metrics::install(thread_pool.metrics.clone());
|
||||
Metrics::get()
|
||||
.proxy
|
||||
.scram_pool
|
||||
.0
|
||||
.set(thread_pool.metrics.clone())
|
||||
.ok();
|
||||
|
||||
let tls_config = match (&args.tls_key, &args.tls_cert) {
|
||||
(Some(key_path), Some(cert_path)) => Some(config::configure_tls(
|
||||
@@ -690,7 +695,7 @@ fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> {
|
||||
};
|
||||
let authentication_config = AuthenticationConfig {
|
||||
jwks_cache: JwkCache::default(),
|
||||
thread_pool,
|
||||
scram_thread_pool: thread_pool,
|
||||
scram_protocol_timeout: args.scram_protocol_timeout,
|
||||
ip_allowlist_check_enabled: !args.is_private_access_proxy,
|
||||
is_vpc_acccess_proxy: args.is_private_access_proxy,
|
||||
@@ -711,12 +716,7 @@ fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> {
|
||||
info!("Using DbSchemaCache with options={db_schema_cache_config:?}");
|
||||
|
||||
let db_schema_cache = if args.is_rest_broker {
|
||||
Some(DbSchemaCache::new(
|
||||
"db_schema_cache",
|
||||
db_schema_cache_config.size,
|
||||
db_schema_cache_config.ttl,
|
||||
true,
|
||||
))
|
||||
Some(DbSchemaCache::new(db_schema_cache_config))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
127
proxy/src/cache/common.rs
vendored
127
proxy/src/cache/common.rs
vendored
@@ -1,4 +1,16 @@
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use moka::Expiry;
|
||||
use moka::notification::RemovalCause;
|
||||
|
||||
use crate::control_plane::messages::ControlPlaneErrorMessage;
|
||||
use crate::metrics::{
|
||||
CacheEviction, CacheKind, CacheOutcome, CacheOutcomeGroup, CacheRemovalCause, Metrics,
|
||||
};
|
||||
|
||||
/// Default TTL used when caching errors from control plane.
|
||||
pub const DEFAULT_ERROR_TTL: Duration = Duration::from_secs(30);
|
||||
|
||||
/// A generic trait which exposes types of cache's key and value,
|
||||
/// as well as the notion of cache entry invalidation.
|
||||
@@ -10,20 +22,16 @@ pub(crate) trait Cache {
|
||||
/// Entry's value.
|
||||
type Value;
|
||||
|
||||
/// Used for entry invalidation.
|
||||
type LookupInfo<Key>;
|
||||
|
||||
/// Invalidate an entry using a lookup info.
|
||||
/// We don't have an empty default impl because it's error-prone.
|
||||
fn invalidate(&self, _: &Self::LookupInfo<Self::Key>);
|
||||
fn invalidate(&self, _: &Self::Key);
|
||||
}
|
||||
|
||||
impl<C: Cache> Cache for &C {
|
||||
type Key = C::Key;
|
||||
type Value = C::Value;
|
||||
type LookupInfo<Key> = C::LookupInfo<Key>;
|
||||
|
||||
fn invalidate(&self, info: &Self::LookupInfo<Self::Key>) {
|
||||
fn invalidate(&self, info: &Self::Key) {
|
||||
C::invalidate(self, info);
|
||||
}
|
||||
}
|
||||
@@ -31,7 +39,7 @@ impl<C: Cache> Cache for &C {
|
||||
/// Wrapper for convenient entry invalidation.
|
||||
pub(crate) struct Cached<C: Cache, V = <C as Cache>::Value> {
|
||||
/// Cache + lookup info.
|
||||
pub(crate) token: Option<(C, C::LookupInfo<C::Key>)>,
|
||||
pub(crate) token: Option<(C, C::Key)>,
|
||||
|
||||
/// The value itself.
|
||||
pub(crate) value: V,
|
||||
@@ -43,23 +51,6 @@ impl<C: Cache, V> Cached<C, V> {
|
||||
Self { token: None, value }
|
||||
}
|
||||
|
||||
pub(crate) fn take_value(self) -> (Cached<C, ()>, V) {
|
||||
(
|
||||
Cached {
|
||||
token: self.token,
|
||||
value: (),
|
||||
},
|
||||
self.value,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn map<U>(self, f: impl FnOnce(V) -> U) -> Cached<C, U> {
|
||||
Cached {
|
||||
token: self.token,
|
||||
value: f(self.value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop this entry from a cache if it's still there.
|
||||
pub(crate) fn invalidate(self) -> V {
|
||||
if let Some((cache, info)) = &self.token {
|
||||
@@ -87,3 +78,91 @@ impl<C: Cache, V> DerefMut for Cached<C, V> {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
|
||||
pub type ControlPlaneResult<T> = Result<T, Box<ControlPlaneErrorMessage>>;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct CplaneExpiry {
|
||||
pub error: Duration,
|
||||
}
|
||||
|
||||
impl Default for CplaneExpiry {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
error: DEFAULT_ERROR_TTL,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CplaneExpiry {
|
||||
pub fn expire_early<V>(
|
||||
&self,
|
||||
value: &ControlPlaneResult<V>,
|
||||
updated: Instant,
|
||||
) -> Option<Duration> {
|
||||
match value {
|
||||
Ok(_) => None,
|
||||
Err(err) => Some(self.expire_err_early(err, updated)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn expire_err_early(&self, err: &ControlPlaneErrorMessage, updated: Instant) -> Duration {
|
||||
err.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.details.retry_info.as_ref())
|
||||
.map_or(self.error, |r| r.retry_at.into_std() - updated)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Expiry<K, ControlPlaneResult<V>> for CplaneExpiry {
|
||||
fn expire_after_create(
|
||||
&self,
|
||||
_key: &K,
|
||||
value: &ControlPlaneResult<V>,
|
||||
created_at: Instant,
|
||||
) -> Option<Duration> {
|
||||
self.expire_early(value, created_at)
|
||||
}
|
||||
|
||||
fn expire_after_update(
|
||||
&self,
|
||||
_key: &K,
|
||||
value: &ControlPlaneResult<V>,
|
||||
updated_at: Instant,
|
||||
_duration_until_expiry: Option<Duration>,
|
||||
) -> Option<Duration> {
|
||||
self.expire_early(value, updated_at)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn eviction_listener(kind: CacheKind, cause: RemovalCause) {
|
||||
let cause = match cause {
|
||||
RemovalCause::Expired => CacheRemovalCause::Expired,
|
||||
RemovalCause::Explicit => CacheRemovalCause::Explicit,
|
||||
RemovalCause::Replaced => CacheRemovalCause::Replaced,
|
||||
RemovalCause::Size => CacheRemovalCause::Size,
|
||||
};
|
||||
Metrics::get()
|
||||
.cache
|
||||
.evicted_total
|
||||
.inc(CacheEviction { cache: kind, cause });
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn count_cache_outcome<T>(kind: CacheKind, cache_result: Option<T>) -> Option<T> {
|
||||
let outcome = if cache_result.is_some() {
|
||||
CacheOutcome::Hit
|
||||
} else {
|
||||
CacheOutcome::Miss
|
||||
};
|
||||
Metrics::get().cache.request_total.inc(CacheOutcomeGroup {
|
||||
cache: kind,
|
||||
outcome,
|
||||
});
|
||||
cache_result
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn count_cache_insert(kind: CacheKind) {
|
||||
Metrics::get().cache.inserted_total.inc(kind);
|
||||
}
|
||||
|
||||
5
proxy/src/cache/mod.rs
vendored
5
proxy/src/cache/mod.rs
vendored
@@ -1,6 +1,5 @@
|
||||
pub(crate) mod common;
|
||||
pub(crate) mod node_info;
|
||||
pub(crate) mod project_info;
|
||||
mod timed_lru;
|
||||
|
||||
pub(crate) use common::{Cache, Cached};
|
||||
pub(crate) use timed_lru::TimedLru;
|
||||
pub(crate) use common::{Cached, ControlPlaneResult, CplaneExpiry};
|
||||
|
||||
60
proxy/src/cache/node_info.rs
vendored
Normal file
60
proxy/src/cache/node_info.rs
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
use crate::cache::common::{Cache, count_cache_insert, count_cache_outcome, eviction_listener};
|
||||
use crate::cache::{Cached, ControlPlaneResult, CplaneExpiry};
|
||||
use crate::config::CacheOptions;
|
||||
use crate::control_plane::NodeInfo;
|
||||
use crate::metrics::{CacheKind, Metrics};
|
||||
use crate::types::EndpointCacheKey;
|
||||
|
||||
pub(crate) struct NodeInfoCache(moka::sync::Cache<EndpointCacheKey, ControlPlaneResult<NodeInfo>>);
|
||||
pub(crate) type CachedNodeInfo = Cached<&'static NodeInfoCache, NodeInfo>;
|
||||
|
||||
impl Cache for NodeInfoCache {
|
||||
type Key = EndpointCacheKey;
|
||||
type Value = ControlPlaneResult<NodeInfo>;
|
||||
|
||||
fn invalidate(&self, info: &EndpointCacheKey) {
|
||||
self.0.invalidate(info);
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeInfoCache {
|
||||
pub fn new(config: CacheOptions) -> Self {
|
||||
let builder = moka::sync::Cache::builder()
|
||||
.name("node_info")
|
||||
.expire_after(CplaneExpiry::default());
|
||||
let builder = config.moka(builder);
|
||||
|
||||
if let Some(size) = config.size {
|
||||
Metrics::get()
|
||||
.cache
|
||||
.capacity
|
||||
.set(CacheKind::NodeInfo, size as i64);
|
||||
}
|
||||
|
||||
let builder = builder
|
||||
.eviction_listener(|_k, _v, cause| eviction_listener(CacheKind::NodeInfo, cause));
|
||||
|
||||
Self(builder.build())
|
||||
}
|
||||
|
||||
pub fn insert(&self, key: EndpointCacheKey, value: ControlPlaneResult<NodeInfo>) {
|
||||
count_cache_insert(CacheKind::NodeInfo);
|
||||
self.0.insert(key, value);
|
||||
}
|
||||
|
||||
pub fn get(&self, key: &EndpointCacheKey) -> Option<ControlPlaneResult<NodeInfo>> {
|
||||
count_cache_outcome(CacheKind::NodeInfo, self.0.get(key))
|
||||
}
|
||||
|
||||
pub fn get_entry(
|
||||
&'static self,
|
||||
key: &EndpointCacheKey,
|
||||
) -> Option<ControlPlaneResult<CachedNodeInfo>> {
|
||||
self.get(key).map(|res| {
|
||||
res.map(|value| Cached {
|
||||
token: Some((self, key.clone())),
|
||||
value,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
388
proxy/src/cache/project_info.rs
vendored
388
proxy/src/cache/project_info.rs
vendored
@@ -1,84 +1,20 @@
|
||||
use std::collections::{HashMap, HashSet, hash_map};
|
||||
use std::collections::HashSet;
|
||||
use std::convert::Infallible;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use clashmap::ClashMap;
|
||||
use clashmap::mapref::one::Ref;
|
||||
use rand::Rng;
|
||||
use tokio::time::Instant;
|
||||
use moka::sync::Cache;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::cache::common::{
|
||||
ControlPlaneResult, CplaneExpiry, count_cache_insert, count_cache_outcome, eviction_listener,
|
||||
};
|
||||
use crate::config::ProjectInfoCacheOptions;
|
||||
use crate::control_plane::messages::{ControlPlaneErrorMessage, Reason};
|
||||
use crate::control_plane::{EndpointAccessControl, RoleAccessControl};
|
||||
use crate::intern::{AccountIdInt, EndpointIdInt, ProjectIdInt, RoleNameInt};
|
||||
use crate::metrics::{CacheKind, Metrics};
|
||||
use crate::types::{EndpointId, RoleName};
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait ProjectInfoCache {
|
||||
fn invalidate_endpoint_access(&self, endpoint_id: EndpointIdInt);
|
||||
fn invalidate_endpoint_access_for_project(&self, project_id: ProjectIdInt);
|
||||
fn invalidate_endpoint_access_for_org(&self, account_id: AccountIdInt);
|
||||
fn invalidate_role_secret_for_project(&self, project_id: ProjectIdInt, role_name: RoleNameInt);
|
||||
}
|
||||
|
||||
struct Entry<T> {
|
||||
expires_at: Instant,
|
||||
value: T,
|
||||
}
|
||||
|
||||
impl<T> Entry<T> {
|
||||
pub(crate) fn new(value: T, ttl: Duration) -> Self {
|
||||
Self {
|
||||
expires_at: Instant::now() + ttl,
|
||||
value,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get(&self) -> Option<&T> {
|
||||
(!self.is_expired()).then_some(&self.value)
|
||||
}
|
||||
|
||||
fn is_expired(&self) -> bool {
|
||||
self.expires_at <= Instant::now()
|
||||
}
|
||||
}
|
||||
|
||||
struct EndpointInfo {
|
||||
role_controls: HashMap<RoleNameInt, Entry<ControlPlaneResult<RoleAccessControl>>>,
|
||||
controls: Option<Entry<ControlPlaneResult<EndpointAccessControl>>>,
|
||||
}
|
||||
|
||||
type ControlPlaneResult<T> = Result<T, Box<ControlPlaneErrorMessage>>;
|
||||
|
||||
impl EndpointInfo {
|
||||
pub(crate) fn get_role_secret_with_ttl(
|
||||
&self,
|
||||
role_name: RoleNameInt,
|
||||
) -> Option<(ControlPlaneResult<RoleAccessControl>, Duration)> {
|
||||
let entry = self.role_controls.get(&role_name)?;
|
||||
let ttl = entry.expires_at - Instant::now();
|
||||
Some((entry.get()?.clone(), ttl))
|
||||
}
|
||||
|
||||
pub(crate) fn get_controls_with_ttl(
|
||||
&self,
|
||||
) -> Option<(ControlPlaneResult<EndpointAccessControl>, Duration)> {
|
||||
let entry = self.controls.as_ref()?;
|
||||
let ttl = entry.expires_at - Instant::now();
|
||||
Some((entry.get()?.clone(), ttl))
|
||||
}
|
||||
|
||||
pub(crate) fn invalidate_endpoint(&mut self) {
|
||||
self.controls = None;
|
||||
}
|
||||
|
||||
pub(crate) fn invalidate_role_secret(&mut self, role_name: RoleNameInt) {
|
||||
self.role_controls.remove(&role_name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Cache for project info.
|
||||
/// This is used to cache auth data for endpoints.
|
||||
/// Invalidation is done by console notifications or by TTL (if console notifications are disabled).
|
||||
@@ -86,8 +22,9 @@ impl EndpointInfo {
|
||||
/// We also store endpoint-to-project mapping in the cache, to be able to access per-endpoint data.
|
||||
/// One may ask, why the data is stored per project, when on the user request there is only data about the endpoint available?
|
||||
/// On the cplane side updates are done per project (or per branch), so it's easier to invalidate the whole project cache.
|
||||
pub struct ProjectInfoCacheImpl {
|
||||
cache: ClashMap<EndpointIdInt, EndpointInfo>,
|
||||
pub struct ProjectInfoCache {
|
||||
role_controls: Cache<(EndpointIdInt, RoleNameInt), ControlPlaneResult<RoleAccessControl>>,
|
||||
ep_controls: Cache<EndpointIdInt, ControlPlaneResult<EndpointAccessControl>>,
|
||||
|
||||
project2ep: ClashMap<ProjectIdInt, HashSet<EndpointIdInt>>,
|
||||
// FIXME(stefan): we need a way to GC the account2ep map.
|
||||
@@ -96,16 +33,13 @@ pub struct ProjectInfoCacheImpl {
|
||||
config: ProjectInfoCacheOptions,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProjectInfoCache for ProjectInfoCacheImpl {
|
||||
fn invalidate_endpoint_access(&self, endpoint_id: EndpointIdInt) {
|
||||
impl ProjectInfoCache {
|
||||
pub fn invalidate_endpoint_access(&self, endpoint_id: EndpointIdInt) {
|
||||
info!("invalidating endpoint access for `{endpoint_id}`");
|
||||
if let Some(mut endpoint_info) = self.cache.get_mut(&endpoint_id) {
|
||||
endpoint_info.invalidate_endpoint();
|
||||
}
|
||||
self.ep_controls.invalidate(&endpoint_id);
|
||||
}
|
||||
|
||||
fn invalidate_endpoint_access_for_project(&self, project_id: ProjectIdInt) {
|
||||
pub fn invalidate_endpoint_access_for_project(&self, project_id: ProjectIdInt) {
|
||||
info!("invalidating endpoint access for project `{project_id}`");
|
||||
let endpoints = self
|
||||
.project2ep
|
||||
@@ -113,13 +47,11 @@ impl ProjectInfoCache for ProjectInfoCacheImpl {
|
||||
.map(|kv| kv.value().clone())
|
||||
.unwrap_or_default();
|
||||
for endpoint_id in endpoints {
|
||||
if let Some(mut endpoint_info) = self.cache.get_mut(&endpoint_id) {
|
||||
endpoint_info.invalidate_endpoint();
|
||||
}
|
||||
self.ep_controls.invalidate(&endpoint_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn invalidate_endpoint_access_for_org(&self, account_id: AccountIdInt) {
|
||||
pub fn invalidate_endpoint_access_for_org(&self, account_id: AccountIdInt) {
|
||||
info!("invalidating endpoint access for org `{account_id}`");
|
||||
let endpoints = self
|
||||
.account2ep
|
||||
@@ -127,13 +59,15 @@ impl ProjectInfoCache for ProjectInfoCacheImpl {
|
||||
.map(|kv| kv.value().clone())
|
||||
.unwrap_or_default();
|
||||
for endpoint_id in endpoints {
|
||||
if let Some(mut endpoint_info) = self.cache.get_mut(&endpoint_id) {
|
||||
endpoint_info.invalidate_endpoint();
|
||||
}
|
||||
self.ep_controls.invalidate(&endpoint_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn invalidate_role_secret_for_project(&self, project_id: ProjectIdInt, role_name: RoleNameInt) {
|
||||
pub fn invalidate_role_secret_for_project(
|
||||
&self,
|
||||
project_id: ProjectIdInt,
|
||||
role_name: RoleNameInt,
|
||||
) {
|
||||
info!(
|
||||
"invalidating role secret for project_id `{}` and role_name `{}`",
|
||||
project_id, role_name,
|
||||
@@ -144,47 +78,73 @@ impl ProjectInfoCache for ProjectInfoCacheImpl {
|
||||
.map(|kv| kv.value().clone())
|
||||
.unwrap_or_default();
|
||||
for endpoint_id in endpoints {
|
||||
if let Some(mut endpoint_info) = self.cache.get_mut(&endpoint_id) {
|
||||
endpoint_info.invalidate_role_secret(role_name);
|
||||
}
|
||||
self.role_controls.invalidate(&(endpoint_id, role_name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProjectInfoCacheImpl {
|
||||
impl ProjectInfoCache {
|
||||
pub(crate) fn new(config: ProjectInfoCacheOptions) -> Self {
|
||||
Metrics::get().cache.capacity.set(
|
||||
CacheKind::ProjectInfoRoles,
|
||||
(config.size * config.max_roles) as i64,
|
||||
);
|
||||
Metrics::get()
|
||||
.cache
|
||||
.capacity
|
||||
.set(CacheKind::ProjectInfoEndpoints, config.size as i64);
|
||||
|
||||
// we cache errors for 30 seconds, unless retry_at is set.
|
||||
let expiry = CplaneExpiry::default();
|
||||
Self {
|
||||
cache: ClashMap::new(),
|
||||
role_controls: Cache::builder()
|
||||
.name("project_info_roles")
|
||||
.eviction_listener(|_k, _v, cause| {
|
||||
eviction_listener(CacheKind::ProjectInfoRoles, cause);
|
||||
})
|
||||
.max_capacity(config.size * config.max_roles)
|
||||
.time_to_live(config.ttl)
|
||||
.expire_after(expiry)
|
||||
.build(),
|
||||
ep_controls: Cache::builder()
|
||||
.name("project_info_endpoints")
|
||||
.eviction_listener(|_k, _v, cause| {
|
||||
eviction_listener(CacheKind::ProjectInfoEndpoints, cause);
|
||||
})
|
||||
.max_capacity(config.size)
|
||||
.time_to_live(config.ttl)
|
||||
.expire_after(expiry)
|
||||
.build(),
|
||||
project2ep: ClashMap::new(),
|
||||
account2ep: ClashMap::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_endpoint_cache(
|
||||
&self,
|
||||
endpoint_id: &EndpointId,
|
||||
) -> Option<Ref<'_, EndpointIdInt, EndpointInfo>> {
|
||||
let endpoint_id = EndpointIdInt::get(endpoint_id)?;
|
||||
self.cache.get(&endpoint_id)
|
||||
}
|
||||
|
||||
pub(crate) fn get_role_secret_with_ttl(
|
||||
pub(crate) fn get_role_secret(
|
||||
&self,
|
||||
endpoint_id: &EndpointId,
|
||||
role_name: &RoleName,
|
||||
) -> Option<(ControlPlaneResult<RoleAccessControl>, Duration)> {
|
||||
) -> Option<ControlPlaneResult<RoleAccessControl>> {
|
||||
let endpoint_id = EndpointIdInt::get(endpoint_id)?;
|
||||
let role_name = RoleNameInt::get(role_name)?;
|
||||
let endpoint_info = self.get_endpoint_cache(endpoint_id)?;
|
||||
endpoint_info.get_role_secret_with_ttl(role_name)
|
||||
|
||||
count_cache_outcome(
|
||||
CacheKind::ProjectInfoRoles,
|
||||
self.role_controls.get(&(endpoint_id, role_name)),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn get_endpoint_access_with_ttl(
|
||||
pub(crate) fn get_endpoint_access(
|
||||
&self,
|
||||
endpoint_id: &EndpointId,
|
||||
) -> Option<(ControlPlaneResult<EndpointAccessControl>, Duration)> {
|
||||
let endpoint_info = self.get_endpoint_cache(endpoint_id)?;
|
||||
endpoint_info.get_controls_with_ttl()
|
||||
) -> Option<ControlPlaneResult<EndpointAccessControl>> {
|
||||
let endpoint_id = EndpointIdInt::get(endpoint_id)?;
|
||||
|
||||
count_cache_outcome(
|
||||
CacheKind::ProjectInfoEndpoints,
|
||||
self.ep_controls.get(&endpoint_id),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn insert_endpoint_access(
|
||||
@@ -203,34 +163,17 @@ impl ProjectInfoCacheImpl {
|
||||
self.insert_project2endpoint(project_id, endpoint_id);
|
||||
}
|
||||
|
||||
if self.cache.len() >= self.config.size {
|
||||
// If there are too many entries, wait until the next gc cycle.
|
||||
return;
|
||||
}
|
||||
|
||||
debug!(
|
||||
key = &*endpoint_id,
|
||||
"created a cache entry for endpoint access"
|
||||
);
|
||||
|
||||
let controls = Some(Entry::new(Ok(controls), self.config.ttl));
|
||||
let role_controls = Entry::new(Ok(role_controls), self.config.ttl);
|
||||
count_cache_insert(CacheKind::ProjectInfoEndpoints);
|
||||
count_cache_insert(CacheKind::ProjectInfoRoles);
|
||||
|
||||
match self.cache.entry(endpoint_id) {
|
||||
clashmap::Entry::Vacant(e) => {
|
||||
e.insert(EndpointInfo {
|
||||
role_controls: HashMap::from_iter([(role_name, role_controls)]),
|
||||
controls,
|
||||
});
|
||||
}
|
||||
clashmap::Entry::Occupied(mut e) => {
|
||||
let ep = e.get_mut();
|
||||
ep.controls = controls;
|
||||
if ep.role_controls.len() < self.config.max_roles {
|
||||
ep.role_controls.insert(role_name, role_controls);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.ep_controls.insert(endpoint_id, Ok(controls));
|
||||
self.role_controls
|
||||
.insert((endpoint_id, role_name), Ok(role_controls));
|
||||
}
|
||||
|
||||
pub(crate) fn insert_endpoint_access_err(
|
||||
@@ -238,55 +181,34 @@ impl ProjectInfoCacheImpl {
|
||||
endpoint_id: EndpointIdInt,
|
||||
role_name: RoleNameInt,
|
||||
msg: Box<ControlPlaneErrorMessage>,
|
||||
ttl: Option<Duration>,
|
||||
) {
|
||||
if self.cache.len() >= self.config.size {
|
||||
// If there are too many entries, wait until the next gc cycle.
|
||||
return;
|
||||
}
|
||||
|
||||
debug!(
|
||||
key = &*endpoint_id,
|
||||
"created a cache entry for an endpoint access error"
|
||||
);
|
||||
|
||||
let ttl = ttl.unwrap_or(self.config.ttl);
|
||||
|
||||
let controls = if msg.get_reason() == Reason::RoleProtected {
|
||||
// RoleProtected is the only role-specific error that control plane can give us.
|
||||
// If a given role name does not exist, it still returns a successful response,
|
||||
// just with an empty secret.
|
||||
None
|
||||
} else {
|
||||
// We can cache all the other errors in EndpointInfo.controls,
|
||||
// because they don't depend on what role name we pass to control plane.
|
||||
Some(Entry::new(Err(msg.clone()), ttl))
|
||||
};
|
||||
|
||||
let role_controls = Entry::new(Err(msg), ttl);
|
||||
|
||||
match self.cache.entry(endpoint_id) {
|
||||
clashmap::Entry::Vacant(e) => {
|
||||
e.insert(EndpointInfo {
|
||||
role_controls: HashMap::from_iter([(role_name, role_controls)]),
|
||||
controls,
|
||||
// RoleProtected is the only role-specific error that control plane can give us.
|
||||
// If a given role name does not exist, it still returns a successful response,
|
||||
// just with an empty secret.
|
||||
if msg.get_reason() != Reason::RoleProtected {
|
||||
// We can cache all the other errors in ep_controls because they don't
|
||||
// depend on what role name we pass to control plane.
|
||||
self.ep_controls
|
||||
.entry(endpoint_id)
|
||||
.and_compute_with(|entry| match entry {
|
||||
// leave the entry alone if it's already Ok
|
||||
Some(entry) if entry.value().is_ok() => moka::ops::compute::Op::Nop,
|
||||
// replace the entry
|
||||
_ => {
|
||||
count_cache_insert(CacheKind::ProjectInfoEndpoints);
|
||||
moka::ops::compute::Op::Put(Err(msg.clone()))
|
||||
}
|
||||
});
|
||||
}
|
||||
clashmap::Entry::Occupied(mut e) => {
|
||||
let ep = e.get_mut();
|
||||
if let Some(entry) = &ep.controls
|
||||
&& !entry.is_expired()
|
||||
&& entry.value.is_ok()
|
||||
{
|
||||
// If we have cached non-expired, non-error controls, keep them.
|
||||
} else {
|
||||
ep.controls = controls;
|
||||
}
|
||||
if ep.role_controls.len() < self.config.max_roles {
|
||||
ep.role_controls.insert(role_name, role_controls);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
count_cache_insert(CacheKind::ProjectInfoRoles);
|
||||
self.role_controls
|
||||
.insert((endpoint_id, role_name), Err(msg));
|
||||
}
|
||||
|
||||
fn insert_project2endpoint(&self, project_id: ProjectIdInt, endpoint_id: EndpointIdInt) {
|
||||
@@ -307,73 +229,35 @@ impl ProjectInfoCacheImpl {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_invalidate_role_secret(&self, endpoint_id: &EndpointId, role_name: &RoleName) {
|
||||
let Some(endpoint_id) = EndpointIdInt::get(endpoint_id) else {
|
||||
return;
|
||||
};
|
||||
let Some(role_name) = RoleNameInt::get(role_name) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(mut endpoint_info) = self.cache.get_mut(&endpoint_id) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let entry = endpoint_info.role_controls.entry(role_name);
|
||||
let hash_map::Entry::Occupied(role_controls) = entry else {
|
||||
return;
|
||||
};
|
||||
|
||||
if role_controls.get().is_expired() {
|
||||
role_controls.remove();
|
||||
}
|
||||
pub fn maybe_invalidate_role_secret(&self, _endpoint_id: &EndpointId, _role_name: &RoleName) {
|
||||
// TODO: Expire the value early if the key is idle.
|
||||
// Currently not an issue as we would just use the TTL to decide, which is what already happens.
|
||||
}
|
||||
|
||||
pub async fn gc_worker(&self) -> anyhow::Result<Infallible> {
|
||||
let mut interval =
|
||||
tokio::time::interval(self.config.gc_interval / (self.cache.shards().len()) as u32);
|
||||
let mut interval = tokio::time::interval(self.config.gc_interval);
|
||||
loop {
|
||||
interval.tick().await;
|
||||
if self.cache.len() < self.config.size {
|
||||
// If there are not too many entries, wait until the next gc cycle.
|
||||
continue;
|
||||
}
|
||||
self.gc();
|
||||
self.ep_controls.run_pending_tasks();
|
||||
self.role_controls.run_pending_tasks();
|
||||
}
|
||||
}
|
||||
|
||||
fn gc(&self) {
|
||||
let shard = rand::rng().random_range(0..self.project2ep.shards().len());
|
||||
debug!(shard, "project_info_cache: performing epoch reclamation");
|
||||
|
||||
// acquire a random shard lock
|
||||
let mut removed = 0;
|
||||
let shard = self.project2ep.shards()[shard].write();
|
||||
for (_, endpoints) in shard.iter() {
|
||||
for endpoint in endpoints {
|
||||
self.cache.remove(endpoint);
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
// We can drop this shard only after making sure that all endpoints are removed.
|
||||
drop(shard);
|
||||
info!("project_info_cache: removed {removed} endpoints");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
use crate::control_plane::messages::{Details, EndpointRateLimitConfig, ErrorInfo, Status};
|
||||
use crate::control_plane::{AccessBlockerFlags, AuthSecret};
|
||||
use crate::scram::ServerSecret;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_project_info_cache_settings() {
|
||||
tokio::time::pause();
|
||||
let cache = ProjectInfoCacheImpl::new(ProjectInfoCacheOptions {
|
||||
size: 2,
|
||||
let cache = ProjectInfoCache::new(ProjectInfoCacheOptions {
|
||||
size: 1,
|
||||
max_roles: 2,
|
||||
ttl: Duration::from_secs(1),
|
||||
gc_interval: Duration::from_secs(600),
|
||||
@@ -423,22 +307,17 @@ mod tests {
|
||||
},
|
||||
);
|
||||
|
||||
let (cached, ttl) = cache
|
||||
.get_role_secret_with_ttl(&endpoint_id, &user1)
|
||||
.unwrap();
|
||||
let cached = cache.get_role_secret(&endpoint_id, &user1).unwrap();
|
||||
assert_eq!(cached.unwrap().secret, secret1);
|
||||
assert_eq!(ttl, cache.config.ttl);
|
||||
|
||||
let (cached, ttl) = cache
|
||||
.get_role_secret_with_ttl(&endpoint_id, &user2)
|
||||
.unwrap();
|
||||
let cached = cache.get_role_secret(&endpoint_id, &user2).unwrap();
|
||||
assert_eq!(cached.unwrap().secret, secret2);
|
||||
assert_eq!(ttl, cache.config.ttl);
|
||||
|
||||
// Shouldn't add more than 2 roles.
|
||||
let user3: RoleName = "user3".into();
|
||||
let secret3 = Some(AuthSecret::Scram(ServerSecret::mock([3; 32])));
|
||||
|
||||
cache.role_controls.run_pending_tasks();
|
||||
cache.insert_endpoint_access(
|
||||
account_id,
|
||||
project_id,
|
||||
@@ -455,31 +334,18 @@ mod tests {
|
||||
},
|
||||
);
|
||||
|
||||
assert!(
|
||||
cache
|
||||
.get_role_secret_with_ttl(&endpoint_id, &user3)
|
||||
.is_none()
|
||||
);
|
||||
cache.role_controls.run_pending_tasks();
|
||||
assert_eq!(cache.role_controls.entry_count(), 2);
|
||||
|
||||
let cached = cache
|
||||
.get_endpoint_access_with_ttl(&endpoint_id)
|
||||
.unwrap()
|
||||
.0
|
||||
.unwrap();
|
||||
assert_eq!(cached.allowed_ips, allowed_ips);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
tokio::time::advance(Duration::from_secs(2)).await;
|
||||
let cached = cache.get_role_secret_with_ttl(&endpoint_id, &user1);
|
||||
assert!(cached.is_none());
|
||||
let cached = cache.get_role_secret_with_ttl(&endpoint_id, &user2);
|
||||
assert!(cached.is_none());
|
||||
let cached = cache.get_endpoint_access_with_ttl(&endpoint_id);
|
||||
assert!(cached.is_none());
|
||||
cache.role_controls.run_pending_tasks();
|
||||
assert_eq!(cache.role_controls.entry_count(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_caching_project_info_errors() {
|
||||
let cache = ProjectInfoCacheImpl::new(ProjectInfoCacheOptions {
|
||||
let cache = ProjectInfoCache::new(ProjectInfoCacheOptions {
|
||||
size: 10,
|
||||
max_roles: 10,
|
||||
ttl: Duration::from_secs(1),
|
||||
@@ -519,34 +385,23 @@ mod tests {
|
||||
status: None,
|
||||
});
|
||||
|
||||
let get_role_secret = |endpoint_id, role_name| {
|
||||
cache
|
||||
.get_role_secret_with_ttl(endpoint_id, role_name)
|
||||
.unwrap()
|
||||
.0
|
||||
};
|
||||
let get_endpoint_access =
|
||||
|endpoint_id| cache.get_endpoint_access_with_ttl(endpoint_id).unwrap().0;
|
||||
let get_role_secret =
|
||||
|endpoint_id, role_name| cache.get_role_secret(endpoint_id, role_name).unwrap();
|
||||
let get_endpoint_access = |endpoint_id| cache.get_endpoint_access(endpoint_id).unwrap();
|
||||
|
||||
// stores role-specific errors only for get_role_secret
|
||||
cache.insert_endpoint_access_err(
|
||||
(&endpoint_id).into(),
|
||||
(&user1).into(),
|
||||
role_msg.clone(),
|
||||
None,
|
||||
);
|
||||
cache.insert_endpoint_access_err((&endpoint_id).into(), (&user1).into(), role_msg.clone());
|
||||
assert_eq!(
|
||||
get_role_secret(&endpoint_id, &user1).unwrap_err().error,
|
||||
role_msg.error
|
||||
);
|
||||
assert!(cache.get_endpoint_access_with_ttl(&endpoint_id).is_none());
|
||||
assert!(cache.get_endpoint_access(&endpoint_id).is_none());
|
||||
|
||||
// stores non-role specific errors for both get_role_secret and get_endpoint_access
|
||||
cache.insert_endpoint_access_err(
|
||||
(&endpoint_id).into(),
|
||||
(&user1).into(),
|
||||
generic_msg.clone(),
|
||||
None,
|
||||
);
|
||||
assert_eq!(
|
||||
get_role_secret(&endpoint_id, &user1).unwrap_err().error,
|
||||
@@ -558,11 +413,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// error isn't returned for other roles in the same endpoint
|
||||
assert!(
|
||||
cache
|
||||
.get_role_secret_with_ttl(&endpoint_id, &user2)
|
||||
.is_none()
|
||||
);
|
||||
assert!(cache.get_role_secret(&endpoint_id, &user2).is_none());
|
||||
|
||||
// success for a role does not overwrite errors for other roles
|
||||
cache.insert_endpoint_access(
|
||||
@@ -590,7 +441,6 @@ mod tests {
|
||||
(&endpoint_id).into(),
|
||||
(&user2).into(),
|
||||
generic_msg.clone(),
|
||||
None,
|
||||
);
|
||||
assert!(get_role_secret(&endpoint_id, &user2).is_err());
|
||||
assert!(get_endpoint_access(&endpoint_id).is_ok());
|
||||
|
||||
262
proxy/src/cache/timed_lru.rs
vendored
262
proxy/src/cache/timed_lru.rs
vendored
@@ -1,262 +0,0 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::hash::Hash;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// This seems to make more sense than `lru` or `cached`:
|
||||
//
|
||||
// * `near/nearcore` ditched `cached` in favor of `lru`
|
||||
// (https://github.com/near/nearcore/issues?q=is%3Aissue+lru+is%3Aclosed).
|
||||
//
|
||||
// * `lru` methods use an obscure `KeyRef` type in their contraints (which is deliberately excluded from docs).
|
||||
// This severely hinders its usage both in terms of creating wrappers and supported key types.
|
||||
//
|
||||
// On the other hand, `hashlink` has good download stats and appears to be maintained.
|
||||
use hashlink::{LruCache, linked_hash_map::RawEntryMut};
|
||||
use tracing::debug;
|
||||
|
||||
use super::Cache;
|
||||
use super::common::Cached;
|
||||
|
||||
/// An implementation of timed LRU cache with fixed capacity.
|
||||
/// Key properties:
|
||||
///
|
||||
/// * Whenever a new entry is inserted, the least recently accessed one is evicted.
|
||||
/// The cache also keeps track of entry's insertion time (`created_at`) and TTL (`expires_at`).
|
||||
///
|
||||
/// * If `update_ttl_on_retrieval` is `true`. When the entry is about to be retrieved, we check its expiration timestamp.
|
||||
/// If the entry has expired, we remove it from the cache; Otherwise we bump the
|
||||
/// expiration timestamp (e.g. +5mins) and change its place in LRU list to prolong
|
||||
/// its existence.
|
||||
///
|
||||
/// * There's an API for immediate invalidation (removal) of a cache entry;
|
||||
/// It's useful in case we know for sure that the entry is no longer correct.
|
||||
/// See [`Cached`] for more information.
|
||||
///
|
||||
/// * Expired entries are kept in the cache, until they are evicted by the LRU policy,
|
||||
/// or by a successful lookup (i.e. the entry hasn't expired yet).
|
||||
/// There is no background job to reap the expired records.
|
||||
///
|
||||
/// * It's possible for an entry that has not yet expired entry to be evicted
|
||||
/// before expired items. That's a bit wasteful, but probably fine in practice.
|
||||
pub(crate) struct TimedLru<K, V> {
|
||||
/// Cache's name for tracing.
|
||||
name: &'static str,
|
||||
|
||||
/// The underlying cache implementation.
|
||||
cache: parking_lot::Mutex<LruCache<K, Entry<V>>>,
|
||||
|
||||
/// Default time-to-live of a single entry.
|
||||
ttl: Duration,
|
||||
|
||||
update_ttl_on_retrieval: bool,
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq, V> Cache for TimedLru<K, V> {
|
||||
type Key = K;
|
||||
type Value = V;
|
||||
type LookupInfo<Key> = Key;
|
||||
|
||||
fn invalidate(&self, info: &Self::LookupInfo<K>) {
|
||||
self.invalidate_raw(info);
|
||||
}
|
||||
}
|
||||
|
||||
struct Entry<T> {
|
||||
created_at: Instant,
|
||||
expires_at: Instant,
|
||||
ttl: Duration,
|
||||
update_ttl_on_retrieval: bool,
|
||||
value: T,
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq, V> TimedLru<K, V> {
|
||||
/// Construct a new LRU cache with timed entries.
|
||||
pub(crate) fn new(
|
||||
name: &'static str,
|
||||
capacity: usize,
|
||||
ttl: Duration,
|
||||
update_ttl_on_retrieval: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
cache: LruCache::new(capacity).into(),
|
||||
ttl,
|
||||
update_ttl_on_retrieval,
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop an entry from the cache if it's outdated.
|
||||
#[tracing::instrument(level = "debug", fields(cache = self.name), skip_all)]
|
||||
fn invalidate_raw(&self, key: &K) {
|
||||
// Do costly things before taking the lock.
|
||||
let mut cache = self.cache.lock();
|
||||
let entry = match cache.raw_entry_mut().from_key(key) {
|
||||
RawEntryMut::Vacant(_) => return,
|
||||
RawEntryMut::Occupied(x) => x.remove(),
|
||||
};
|
||||
drop(cache); // drop lock before logging
|
||||
|
||||
let Entry {
|
||||
created_at,
|
||||
expires_at,
|
||||
..
|
||||
} = entry;
|
||||
|
||||
debug!(
|
||||
?created_at,
|
||||
?expires_at,
|
||||
"processed a cache entry invalidation event"
|
||||
);
|
||||
}
|
||||
|
||||
/// Try retrieving an entry by its key, then execute `extract` if it exists.
|
||||
#[tracing::instrument(level = "debug", fields(cache = self.name), skip_all)]
|
||||
fn get_raw<Q, R>(&self, key: &Q, extract: impl FnOnce(&K, &Entry<V>) -> R) -> Option<R>
|
||||
where
|
||||
K: Borrow<Q>,
|
||||
Q: Hash + Eq + ?Sized,
|
||||
{
|
||||
let now = Instant::now();
|
||||
|
||||
// Do costly things before taking the lock.
|
||||
let mut cache = self.cache.lock();
|
||||
let mut raw_entry = match cache.raw_entry_mut().from_key(key) {
|
||||
RawEntryMut::Vacant(_) => return None,
|
||||
RawEntryMut::Occupied(x) => x,
|
||||
};
|
||||
|
||||
// Immeditely drop the entry if it has expired.
|
||||
let entry = raw_entry.get();
|
||||
if entry.expires_at <= now {
|
||||
raw_entry.remove();
|
||||
return None;
|
||||
}
|
||||
|
||||
let value = extract(raw_entry.key(), entry);
|
||||
let (created_at, expires_at) = (entry.created_at, entry.expires_at);
|
||||
|
||||
// Update the deadline and the entry's position in the LRU list.
|
||||
let deadline = now.checked_add(raw_entry.get().ttl).expect("time overflow");
|
||||
if raw_entry.get().update_ttl_on_retrieval {
|
||||
raw_entry.get_mut().expires_at = deadline;
|
||||
}
|
||||
raw_entry.to_back();
|
||||
|
||||
drop(cache); // drop lock before logging
|
||||
debug!(
|
||||
created_at = format_args!("{created_at:?}"),
|
||||
old_expires_at = format_args!("{expires_at:?}"),
|
||||
new_expires_at = format_args!("{deadline:?}"),
|
||||
"accessed a cache entry"
|
||||
);
|
||||
|
||||
Some(value)
|
||||
}
|
||||
|
||||
/// Insert an entry to the cache. If an entry with the same key already
|
||||
/// existed, return the previous value and its creation timestamp.
|
||||
#[tracing::instrument(level = "debug", fields(cache = self.name), skip_all)]
|
||||
fn insert_raw(&self, key: K, value: V) -> (Instant, Option<V>) {
|
||||
self.insert_raw_ttl(key, value, self.ttl, self.update_ttl_on_retrieval)
|
||||
}
|
||||
|
||||
/// Insert an entry to the cache. If an entry with the same key already
|
||||
/// existed, return the previous value and its creation timestamp.
|
||||
#[tracing::instrument(level = "debug", fields(cache = self.name), skip_all)]
|
||||
fn insert_raw_ttl(
|
||||
&self,
|
||||
key: K,
|
||||
value: V,
|
||||
ttl: Duration,
|
||||
update: bool,
|
||||
) -> (Instant, Option<V>) {
|
||||
let created_at = Instant::now();
|
||||
let expires_at = created_at.checked_add(ttl).expect("time overflow");
|
||||
|
||||
let entry = Entry {
|
||||
created_at,
|
||||
expires_at,
|
||||
ttl,
|
||||
update_ttl_on_retrieval: update,
|
||||
value,
|
||||
};
|
||||
|
||||
// Do costly things before taking the lock.
|
||||
let old = self
|
||||
.cache
|
||||
.lock()
|
||||
.insert(key, entry)
|
||||
.map(|entry| entry.value);
|
||||
|
||||
debug!(
|
||||
created_at = format_args!("{created_at:?}"),
|
||||
expires_at = format_args!("{expires_at:?}"),
|
||||
replaced = old.is_some(),
|
||||
"created a cache entry"
|
||||
);
|
||||
|
||||
(created_at, old)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq + Clone, V: Clone> TimedLru<K, V> {
|
||||
pub(crate) fn insert_ttl(&self, key: K, value: V, ttl: Duration) {
|
||||
self.insert_raw_ttl(key, value, ttl, false);
|
||||
}
|
||||
|
||||
#[cfg(feature = "rest_broker")]
|
||||
pub(crate) fn insert(&self, key: K, value: V) {
|
||||
self.insert_raw_ttl(key, value, self.ttl, self.update_ttl_on_retrieval);
|
||||
}
|
||||
|
||||
pub(crate) fn insert_unit(&self, key: K, value: V) -> (Option<V>, Cached<&Self, ()>) {
|
||||
let (_, old) = self.insert_raw(key.clone(), value);
|
||||
|
||||
let cached = Cached {
|
||||
token: Some((self, key)),
|
||||
value: (),
|
||||
};
|
||||
|
||||
(old, cached)
|
||||
}
|
||||
|
||||
#[cfg(feature = "rest_broker")]
|
||||
pub(crate) fn flush(&self) {
|
||||
let now = Instant::now();
|
||||
let mut cache = self.cache.lock();
|
||||
|
||||
// Collect keys of expired entries first
|
||||
let expired_keys: Vec<_> = cache
|
||||
.iter()
|
||||
.filter_map(|(key, entry)| {
|
||||
if entry.expires_at <= now {
|
||||
Some(key.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Remove expired entries
|
||||
for key in expired_keys {
|
||||
cache.remove(&key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq, V: Clone> TimedLru<K, V> {
|
||||
/// Retrieve a cached entry in convenient wrapper, alongside timing information.
|
||||
pub(crate) fn get_with_created_at<Q>(
|
||||
&self,
|
||||
key: &Q,
|
||||
) -> Option<Cached<&Self, (<Self as Cache>::Value, Instant)>>
|
||||
where
|
||||
K: Borrow<Q> + Clone,
|
||||
Q: Hash + Eq + ?Sized,
|
||||
{
|
||||
self.get_raw(key, |key, entry| Cached {
|
||||
token: Some((self, key.clone())),
|
||||
value: (entry.value.clone(), entry.created_at),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,7 @@ use crate::control_plane::messages::MetricsAuxInfo;
|
||||
use crate::error::{ReportableError, UserFacingError};
|
||||
use crate::metrics::{Metrics, NumDbConnectionsGuard};
|
||||
use crate::pqproto::StartupMessageParams;
|
||||
use crate::proxy::connect_compute::TlsNegotiation;
|
||||
use crate::proxy::neon_option;
|
||||
use crate::types::Host;
|
||||
|
||||
@@ -84,6 +85,14 @@ pub(crate) enum ConnectionError {
|
||||
|
||||
#[error("error acquiring resource permit: {0}")]
|
||||
TooManyConnectionAttempts(#[from] ApiLockError),
|
||||
|
||||
#[cfg(test)]
|
||||
#[error("retryable: {retryable}, wakeable: {wakeable}, kind: {kind:?}")]
|
||||
TestError {
|
||||
retryable: bool,
|
||||
wakeable: bool,
|
||||
kind: crate::error::ErrorKind,
|
||||
},
|
||||
}
|
||||
|
||||
impl UserFacingError for ConnectionError {
|
||||
@@ -94,6 +103,8 @@ impl UserFacingError for ConnectionError {
|
||||
"Failed to acquire permit to connect to the database. Too many database connection attempts are currently ongoing.".to_owned()
|
||||
}
|
||||
ConnectionError::TlsError(_) => COULD_NOT_CONNECT.to_owned(),
|
||||
#[cfg(test)]
|
||||
ConnectionError::TestError { .. } => self.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,6 +115,8 @@ impl ReportableError for ConnectionError {
|
||||
ConnectionError::TlsError(_) => crate::error::ErrorKind::Compute,
|
||||
ConnectionError::WakeComputeError(e) => e.get_error_kind(),
|
||||
ConnectionError::TooManyConnectionAttempts(e) => e.get_error_kind(),
|
||||
#[cfg(test)]
|
||||
ConnectionError::TestError { kind, .. } => *kind,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -256,6 +269,7 @@ impl ConnectInfo {
|
||||
async fn connect_raw(
|
||||
&self,
|
||||
config: &ComputeConfig,
|
||||
tls: TlsNegotiation,
|
||||
) -> Result<(SocketAddr, MaybeTlsStream<TcpStream, RustlsStream>), TlsError> {
|
||||
let timeout = config.timeout;
|
||||
|
||||
@@ -298,7 +312,7 @@ impl ConnectInfo {
|
||||
match connect_once(&*addrs).await {
|
||||
Ok((sockaddr, stream)) => Ok((
|
||||
sockaddr,
|
||||
tls::connect_tls(stream, self.ssl_mode, config, host).await?,
|
||||
tls::connect_tls(stream, self.ssl_mode, config, host, tls).await?,
|
||||
)),
|
||||
Err(err) => {
|
||||
warn!("couldn't connect to compute node at {host}:{port}: {err}");
|
||||
@@ -329,9 +343,10 @@ impl ConnectInfo {
|
||||
ctx: &RequestContext,
|
||||
aux: &MetricsAuxInfo,
|
||||
config: &ComputeConfig,
|
||||
tls: TlsNegotiation,
|
||||
) -> Result<ComputeConnection, ConnectionError> {
|
||||
let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
|
||||
let (socket_addr, stream) = self.connect_raw(config).await?;
|
||||
let (socket_addr, stream) = self.connect_raw(config, tls).await?;
|
||||
drop(pause);
|
||||
|
||||
tracing::Span::current().record("compute_id", tracing::field::display(&aux.compute_id));
|
||||
|
||||
@@ -7,6 +7,7 @@ use thiserror::Error;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use crate::pqproto::request_tls;
|
||||
use crate::proxy::connect_compute::TlsNegotiation;
|
||||
use crate::proxy::retry::CouldRetry;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
@@ -35,6 +36,7 @@ pub async fn connect_tls<S, T>(
|
||||
mode: SslMode,
|
||||
tls: &T,
|
||||
host: &str,
|
||||
negotiation: TlsNegotiation,
|
||||
) -> Result<MaybeTlsStream<S, T::Stream>, TlsError>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin + Send,
|
||||
@@ -49,12 +51,15 @@ where
|
||||
SslMode::Prefer | SslMode::Require => {}
|
||||
}
|
||||
|
||||
if !request_tls(&mut stream).await? {
|
||||
if SslMode::Require == mode {
|
||||
return Err(TlsError::Required);
|
||||
}
|
||||
|
||||
return Ok(MaybeTlsStream::Raw(stream));
|
||||
match negotiation {
|
||||
// No TLS request needed
|
||||
TlsNegotiation::Direct => {}
|
||||
// TLS request successful
|
||||
TlsNegotiation::Postgres if request_tls(&mut stream).await? => {}
|
||||
// TLS request failed but is required
|
||||
TlsNegotiation::Postgres if SslMode::Require == mode => return Err(TlsError::Required),
|
||||
// TLS request failed but is not required
|
||||
TlsNegotiation::Postgres => return Ok(MaybeTlsStream::Raw(stream)),
|
||||
}
|
||||
|
||||
Ok(MaybeTlsStream::Tls(
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::control_plane::messages::{EndpointJwksResponse, JwksSettings};
|
||||
use crate::ext::TaskExt;
|
||||
use crate::intern::RoleNameInt;
|
||||
use crate::rate_limiter::{RateLimitAlgorithm, RateLimiterConfig};
|
||||
use crate::scram::threadpool::ThreadPool;
|
||||
use crate::scram;
|
||||
use crate::serverless::GlobalConnPoolOptions;
|
||||
use crate::serverless::cancel_set::CancelSet;
|
||||
#[cfg(feature = "rest_broker")]
|
||||
@@ -75,7 +75,7 @@ pub struct HttpConfig {
|
||||
}
|
||||
|
||||
pub struct AuthenticationConfig {
|
||||
pub thread_pool: Arc<ThreadPool>,
|
||||
pub scram_thread_pool: Arc<scram::threadpool::ThreadPool>,
|
||||
pub scram_protocol_timeout: tokio::time::Duration,
|
||||
pub ip_allowlist_check_enabled: bool,
|
||||
pub is_vpc_acccess_proxy: bool,
|
||||
@@ -107,20 +107,23 @@ pub fn remote_storage_from_toml(s: &str) -> anyhow::Result<RemoteStorageConfig>
|
||||
#[derive(Debug)]
|
||||
pub struct CacheOptions {
|
||||
/// Max number of entries.
|
||||
pub size: usize,
|
||||
pub size: Option<u64>,
|
||||
/// Entry's time-to-live.
|
||||
pub ttl: Duration,
|
||||
pub absolute_ttl: Option<Duration>,
|
||||
/// Entry's time-to-idle.
|
||||
pub idle_ttl: Option<Duration>,
|
||||
}
|
||||
|
||||
impl CacheOptions {
|
||||
/// Default options for [`crate::control_plane::NodeInfoCache`].
|
||||
pub const CACHE_DEFAULT_OPTIONS: &'static str = "size=4000,ttl=4m";
|
||||
/// Default options for [`crate::cache::node_info::NodeInfoCache`].
|
||||
pub const CACHE_DEFAULT_OPTIONS: &'static str = "size=4000,idle_ttl=4m";
|
||||
|
||||
/// Parse cache options passed via cmdline.
|
||||
/// Example: [`Self::CACHE_DEFAULT_OPTIONS`].
|
||||
fn parse(options: &str) -> anyhow::Result<Self> {
|
||||
let mut size = None;
|
||||
let mut ttl = None;
|
||||
let mut absolute_ttl = None;
|
||||
let mut idle_ttl = None;
|
||||
|
||||
for option in options.split(',') {
|
||||
let (key, value) = option
|
||||
@@ -129,21 +132,34 @@ impl CacheOptions {
|
||||
|
||||
match key {
|
||||
"size" => size = Some(value.parse()?),
|
||||
"ttl" => ttl = Some(humantime::parse_duration(value)?),
|
||||
"absolute_ttl" | "ttl" => absolute_ttl = Some(humantime::parse_duration(value)?),
|
||||
"idle_ttl" | "tti" => idle_ttl = Some(humantime::parse_duration(value)?),
|
||||
unknown => bail!("unknown key: {unknown}"),
|
||||
}
|
||||
}
|
||||
|
||||
// TTL doesn't matter if cache is always empty.
|
||||
if let Some(0) = size {
|
||||
ttl.get_or_insert(Duration::default());
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
size: size.context("missing `size`")?,
|
||||
ttl: ttl.context("missing `ttl`")?,
|
||||
size,
|
||||
absolute_ttl,
|
||||
idle_ttl,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn moka<K, V, C>(
|
||||
&self,
|
||||
mut builder: moka::sync::CacheBuilder<K, V, C>,
|
||||
) -> moka::sync::CacheBuilder<K, V, C> {
|
||||
if let Some(size) = self.size {
|
||||
builder = builder.max_capacity(size);
|
||||
}
|
||||
if let Some(ttl) = self.absolute_ttl {
|
||||
builder = builder.time_to_live(ttl);
|
||||
}
|
||||
if let Some(tti) = self.idle_ttl {
|
||||
builder = builder.time_to_idle(tti);
|
||||
}
|
||||
builder
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for CacheOptions {
|
||||
@@ -159,17 +175,17 @@ impl FromStr for CacheOptions {
|
||||
#[derive(Debug)]
|
||||
pub struct ProjectInfoCacheOptions {
|
||||
/// Max number of entries.
|
||||
pub size: usize,
|
||||
pub size: u64,
|
||||
/// Entry's time-to-live.
|
||||
pub ttl: Duration,
|
||||
/// Max number of roles per endpoint.
|
||||
pub max_roles: usize,
|
||||
pub max_roles: u64,
|
||||
/// Gc interval.
|
||||
pub gc_interval: Duration,
|
||||
}
|
||||
|
||||
impl ProjectInfoCacheOptions {
|
||||
/// Default options for [`crate::control_plane::NodeInfoCache`].
|
||||
/// Default options for [`crate::cache::project_info::ProjectInfoCache`].
|
||||
pub const CACHE_DEFAULT_OPTIONS: &'static str =
|
||||
"size=10000,ttl=4m,max_roles=10,gc_interval=60m";
|
||||
|
||||
@@ -496,21 +512,37 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_cache_options() -> anyhow::Result<()> {
|
||||
let CacheOptions { size, ttl } = "size=4096,ttl=5min".parse()?;
|
||||
assert_eq!(size, 4096);
|
||||
assert_eq!(ttl, Duration::from_secs(5 * 60));
|
||||
let CacheOptions {
|
||||
size,
|
||||
absolute_ttl,
|
||||
idle_ttl: _,
|
||||
} = "size=4096,ttl=5min".parse()?;
|
||||
assert_eq!(size, Some(4096));
|
||||
assert_eq!(absolute_ttl, Some(Duration::from_secs(5 * 60)));
|
||||
|
||||
let CacheOptions { size, ttl } = "ttl=4m,size=2".parse()?;
|
||||
assert_eq!(size, 2);
|
||||
assert_eq!(ttl, Duration::from_secs(4 * 60));
|
||||
let CacheOptions {
|
||||
size,
|
||||
absolute_ttl,
|
||||
idle_ttl: _,
|
||||
} = "ttl=4m,size=2".parse()?;
|
||||
assert_eq!(size, Some(2));
|
||||
assert_eq!(absolute_ttl, Some(Duration::from_secs(4 * 60)));
|
||||
|
||||
let CacheOptions { size, ttl } = "size=0,ttl=1s".parse()?;
|
||||
assert_eq!(size, 0);
|
||||
assert_eq!(ttl, Duration::from_secs(1));
|
||||
let CacheOptions {
|
||||
size,
|
||||
absolute_ttl,
|
||||
idle_ttl: _,
|
||||
} = "size=0,ttl=1s".parse()?;
|
||||
assert_eq!(size, Some(0));
|
||||
assert_eq!(absolute_ttl, Some(Duration::from_secs(1)));
|
||||
|
||||
let CacheOptions { size, ttl } = "size=0".parse()?;
|
||||
assert_eq!(size, 0);
|
||||
assert_eq!(ttl, Duration::default());
|
||||
let CacheOptions {
|
||||
size,
|
||||
absolute_ttl,
|
||||
idle_ttl: _,
|
||||
} = "size=0".parse()?;
|
||||
assert_eq!(size, Some(0));
|
||||
assert_eq!(absolute_ttl, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -16,8 +16,9 @@ use crate::pglb::ClientRequestError;
|
||||
use crate::pglb::handshake::{HandshakeData, handshake};
|
||||
use crate::pglb::passthrough::ProxyPassthrough;
|
||||
use crate::protocol2::{ConnectHeader, ConnectionInfo, read_proxy_protocol};
|
||||
use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute};
|
||||
use crate::proxy::{ErrorSource, forward_compute_params_to_client, send_client_greeting};
|
||||
use crate::proxy::{
|
||||
ErrorSource, connect_compute, forward_compute_params_to_client, send_client_greeting,
|
||||
};
|
||||
use crate::util::run_until_cancelled;
|
||||
|
||||
pub async fn task_main(
|
||||
@@ -215,14 +216,11 @@ pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
};
|
||||
auth_info.set_startup_params(¶ms, true);
|
||||
|
||||
let mut node = connect_to_compute(
|
||||
let mut node = connect_compute::connect_to_compute(
|
||||
ctx,
|
||||
&TcpMechanism {
|
||||
locks: &config.connect_compute_locks,
|
||||
},
|
||||
config,
|
||||
&node_info,
|
||||
config.wake_compute_retry_config,
|
||||
&config.connect_to_compute,
|
||||
connect_compute::TlsNegotiation::Postgres,
|
||||
)
|
||||
.or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) })
|
||||
.await?;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user