mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-08 14:02:55 +00:00
Merge commit 'b623fbae0' into problame/standby-horizon-leases
This commit is contained in:
384
.github/workflows/benchbase_tpcc.yml
vendored
Normal file
384
.github/workflows/benchbase_tpcc.yml
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
name: TPC-C like benchmark using benchbase
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
# │ ┌───────────── hour (0 - 23)
|
||||
# │ │ ┌───────────── day of the month (1 - 31)
|
||||
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
|
||||
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
|
||||
- cron: '0 6 * * *' # run once a day at 6 AM UTC
|
||||
workflow_dispatch: # adds ability to run this manually
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -euxo pipefail {0}
|
||||
|
||||
concurrency:
|
||||
# Allow only one workflow globally because we do not want to be too noisy in production environment
|
||||
group: benchbase-tpcc-workflow
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
benchbase-tpcc:
|
||||
strategy:
|
||||
fail-fast: false # allow other variants to continue even if one fails
|
||||
matrix:
|
||||
include:
|
||||
- warehouses: 50 # defines number of warehouses and is used to compute number of terminals
|
||||
max_rate: 800 # measured max TPS at scale factor based on experiments. Adjust if performance is better/worse
|
||||
min_cu: 0.25 # simulate free tier plan (0.25 -2 CU)
|
||||
max_cu: 2
|
||||
- warehouses: 500 # serverless plan (2-8 CU)
|
||||
max_rate: 2000
|
||||
min_cu: 2
|
||||
max_cu: 8
|
||||
- warehouses: 1000 # business plan (2-16 CU)
|
||||
max_rate: 2900
|
||||
min_cu: 2
|
||||
max_cu: 16
|
||||
max-parallel: 1 # we want to run each workload size sequentially to avoid noisy neighbors
|
||||
permissions:
|
||||
contents: write
|
||||
statuses: write
|
||||
id-token: write # aws-actions/configure-aws-credentials
|
||||
env:
|
||||
PG_CONFIG: /tmp/neon/pg_install/v17/bin/pg_config
|
||||
PSQL: /tmp/neon/pg_install/v17/bin/psql
|
||||
PG_17_LIB_PATH: /tmp/neon/pg_install/v17/lib
|
||||
POSTGRES_VERSION: 17
|
||||
runs-on: [ self-hosted, us-east-2, x64 ]
|
||||
timeout-minutes: 1440
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Configure AWS credentials # necessary to download artefacts
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
with:
|
||||
aws-region: eu-central-1
|
||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role
|
||||
|
||||
- name: Download Neon artifact
|
||||
uses: ./.github/actions/download
|
||||
with:
|
||||
name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact
|
||||
path: /tmp/neon/
|
||||
prefix: latest
|
||||
aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
|
||||
- name: Create Neon Project
|
||||
id: create-neon-project-tpcc
|
||||
uses: ./.github/actions/neon-project-create
|
||||
with:
|
||||
region_id: aws-us-east-2
|
||||
postgres_version: ${{ env.POSTGRES_VERSION }}
|
||||
compute_units: '[${{ matrix.min_cu }}, ${{ matrix.max_cu }}]'
|
||||
api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }}
|
||||
api_host: console.neon.tech # production (!)
|
||||
|
||||
- name: Initialize Neon project
|
||||
env:
|
||||
BENCHMARK_TPCC_CONNSTR: ${{ steps.create-neon-project-tpcc.outputs.dsn }}
|
||||
PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
run: |
|
||||
echo "Initializing Neon project with project_id: ${PROJECT_ID}"
|
||||
export LD_LIBRARY_PATH=${PG_17_LIB_PATH}
|
||||
|
||||
# Retry logic for psql connection with 1 minute sleep between attempts
|
||||
for attempt in {1..3}; do
|
||||
echo "Attempt ${attempt}/3: Creating extensions in Neon project"
|
||||
if ${PSQL} "${BENCHMARK_TPCC_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"; then
|
||||
echo "Successfully created extensions"
|
||||
break
|
||||
else
|
||||
echo "Failed to create extensions on attempt ${attempt}"
|
||||
if [ ${attempt} -lt 3 ]; then
|
||||
echo "Waiting 60 seconds before retry..."
|
||||
sleep 60
|
||||
else
|
||||
echo "All attempts failed, exiting"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "BENCHMARK_TPCC_CONNSTR=${BENCHMARK_TPCC_CONNSTR}" >> $GITHUB_ENV
|
||||
|
||||
- name: Generate BenchBase workload configuration
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
MAX_RATE: ${{ matrix.max_rate }}
|
||||
run: |
|
||||
echo "Generating BenchBase configs for warehouses: ${WAREHOUSES}, max_rate: ${MAX_RATE}"
|
||||
|
||||
# Extract hostname and password from connection string
|
||||
# Format: postgresql://username:password@hostname/database?params (no port for Neon)
|
||||
HOSTNAME=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:[^@]*@\([^/]*\)/.*|\1|p')
|
||||
PASSWORD=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:\([^@]*\)@.*|\1|p')
|
||||
|
||||
echo "Extracted hostname: ${HOSTNAME}"
|
||||
|
||||
# Use runner temp (NVMe) as working directory
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
# Copy the generator script
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py" .
|
||||
|
||||
# Generate configs and scripts
|
||||
python3 generate_workload_size.py \
|
||||
--warehouses ${WAREHOUSES} \
|
||||
--max-rate ${MAX_RATE} \
|
||||
--hostname ${HOSTNAME} \
|
||||
--password ${PASSWORD} \
|
||||
--runner-arch ${{ runner.arch }}
|
||||
|
||||
# Fix path mismatch: move generated configs and scripts to expected locations
|
||||
mv ../configs ./configs
|
||||
mv ../scripts ./scripts
|
||||
|
||||
- name: Prepare database (load data)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Loading ${WAREHOUSES} warehouses into database..."
|
||||
|
||||
# Run the loader script and capture output to log file while preserving stdout/stderr
|
||||
./scripts/load_${WAREHOUSES}_warehouses.sh 2>&1 | tee "load_${WAREHOUSES}_warehouses.log"
|
||||
|
||||
echo "Database loading completed"
|
||||
|
||||
- name: Run TPC-C benchmark (warmup phase, then benchmark at 70% of configuredmax TPS)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Running TPC-C benchmark with ${WAREHOUSES} warehouses..."
|
||||
|
||||
# Run the optimal rate benchmark
|
||||
./scripts/execute_${WAREHOUSES}_warehouses_opt_rate.sh
|
||||
|
||||
echo "Benchmark execution completed"
|
||||
|
||||
- name: Run TPC-C benchmark (warmup phase, then ramp down TPS and up again in 5 minute intervals)
|
||||
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Running TPC-C ramp-down-up with ${WAREHOUSES} warehouses..."
|
||||
|
||||
# Run the optimal rate benchmark
|
||||
./scripts/execute_${WAREHOUSES}_warehouses_ramp_up.sh
|
||||
|
||||
echo "Benchmark execution completed"
|
||||
|
||||
- name: Process results (upload to test results database and generate diagrams)
|
||||
env:
|
||||
WAREHOUSES: ${{ matrix.warehouses }}
|
||||
MIN_CU: ${{ matrix.min_cu }}
|
||||
MAX_CU: ${{ matrix.max_cu }}
|
||||
PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
REVISION: ${{ github.sha }}
|
||||
PERF_DB_CONNSTR: ${{ secrets.PERF_TEST_RESULT_CONNSTR }}
|
||||
run: |
|
||||
cd "${RUNNER_TEMP}"
|
||||
|
||||
echo "Creating temporary Python environment for results processing..."
|
||||
|
||||
# Create temporary virtual environment
|
||||
python3 -m venv temp_results_env
|
||||
source temp_results_env/bin/activate
|
||||
|
||||
# Install required packages in virtual environment
|
||||
pip install matplotlib pandas psycopg2-binary
|
||||
|
||||
echo "Copying results processing scripts..."
|
||||
|
||||
# Copy both processing scripts
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py" .
|
||||
cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py" .
|
||||
|
||||
echo "Processing load phase metrics..."
|
||||
|
||||
# Find and process load log
|
||||
LOAD_LOG=$(find . -name "load_${WAREHOUSES}_warehouses.log" -type f | head -1)
|
||||
if [ -n "$LOAD_LOG" ]; then
|
||||
echo "Processing load metrics from: $LOAD_LOG"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--load-log "$LOAD_LOG" \
|
||||
--run-type "load" \
|
||||
--warehouses "${WAREHOUSES}" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Load log file not found: load_${WAREHOUSES}_warehouses.log"
|
||||
fi
|
||||
|
||||
echo "Processing warmup results for optimal rate..."
|
||||
|
||||
# Find and process warmup results
|
||||
WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | head -1)
|
||||
WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then
|
||||
echo "Generating warmup diagram from: $WARMUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$WARMUP_CSV" \
|
||||
--output-svg "warmup_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "Warmup at max TPS"
|
||||
|
||||
echo "Uploading warmup metrics from: $WARMUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$WARMUP_JSON" \
|
||||
--results-csv "$WARMUP_CSV" \
|
||||
--run-type "warmup" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing optimal rate results..."
|
||||
|
||||
# Find and process optimal rate results
|
||||
OPTRATE_CSV=$(find results_opt_rate -name "*.results.csv" -type f | head -1)
|
||||
OPTRATE_JSON=$(find results_opt_rate -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$OPTRATE_CSV" ] && [ -n "$OPTRATE_JSON" ]; then
|
||||
echo "Generating optimal rate diagram from: $OPTRATE_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$OPTRATE_CSV" \
|
||||
--output-svg "benchmark_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "70% of max TPS"
|
||||
|
||||
echo "Uploading optimal rate metrics from: $OPTRATE_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$OPTRATE_JSON" \
|
||||
--results-csv "$OPTRATE_CSV" \
|
||||
--run-type "opt-rate" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing optimal rate results files (CSV: $OPTRATE_CSV, JSON: $OPTRATE_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing warmup 2 results for ramp down/up phase..."
|
||||
|
||||
# Find and process warmup results
|
||||
WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | tail -1)
|
||||
WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | tail -1)
|
||||
|
||||
if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then
|
||||
echo "Generating warmup diagram from: $WARMUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$WARMUP_CSV" \
|
||||
--output-svg "warmup_2_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "Warmup at max TPS"
|
||||
|
||||
echo "Uploading warmup metrics from: $WARMUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$WARMUP_JSON" \
|
||||
--results-csv "$WARMUP_CSV" \
|
||||
--run-type "warmup" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)"
|
||||
fi
|
||||
|
||||
echo "Processing ramp results..."
|
||||
|
||||
# Find and process ramp results
|
||||
RAMPUP_CSV=$(find results_ramp_up -name "*.results.csv" -type f | head -1)
|
||||
RAMPUP_JSON=$(find results_ramp_up -name "*.summary.json" -type f | head -1)
|
||||
|
||||
if [ -n "$RAMPUP_CSV" ] && [ -n "$RAMPUP_JSON" ]; then
|
||||
echo "Generating ramp diagram from: $RAMPUP_CSV"
|
||||
python generate_diagrams.py \
|
||||
--input-csv "$RAMPUP_CSV" \
|
||||
--output-svg "ramp_${WAREHOUSES}_warehouses_performance.svg" \
|
||||
--title-suffix "ramp TPS down and up in 5 minute intervals"
|
||||
|
||||
echo "Uploading ramp metrics from: $RAMPUP_JSON"
|
||||
python upload_results_to_perf_test_results.py \
|
||||
--summary-json "$RAMPUP_JSON" \
|
||||
--results-csv "$RAMPUP_CSV" \
|
||||
--run-type "ramp-up" \
|
||||
--min-cu "${MIN_CU}" \
|
||||
--max-cu "${MAX_CU}" \
|
||||
--project-id "${PROJECT_ID}" \
|
||||
--revision "${REVISION}" \
|
||||
--connection-string "${PERF_DB_CONNSTR}"
|
||||
else
|
||||
echo "Warning: Missing ramp results files (CSV: $RAMPUP_CSV, JSON: $RAMPUP_JSON)"
|
||||
fi
|
||||
|
||||
# Deactivate and clean up virtual environment
|
||||
deactivate
|
||||
rm -rf temp_results_env
|
||||
rm upload_results_to_perf_test_results.py
|
||||
|
||||
echo "Results processing completed and environment cleaned up"
|
||||
|
||||
- name: Set date for upload
|
||||
id: set-date
|
||||
run: echo "date=$(date +%Y-%m-%d)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Configure AWS credentials # necessary to upload results
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
with:
|
||||
aws-region: us-east-2
|
||||
role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }}
|
||||
role-duration-seconds: 900 # 900 is minimum value
|
||||
|
||||
- name: Upload benchmark results to S3
|
||||
env:
|
||||
S3_BUCKET: neon-public-benchmark-results
|
||||
S3_PREFIX: benchbase-tpc-c/${{ steps.set-date.outputs.date }}/${{ github.run_id }}/${{ matrix.warehouses }}-warehouses
|
||||
run: |
|
||||
echo "Redacting passwords from configuration files before upload..."
|
||||
|
||||
# Mask all passwords in XML config files
|
||||
find "${RUNNER_TEMP}/configs" -name "*.xml" -type f -exec sed -i 's|<password>[^<]*</password>|<password>redacted</password>|g' {} \;
|
||||
|
||||
echo "Uploading benchmark results to s3://${S3_BUCKET}/${S3_PREFIX}/"
|
||||
|
||||
# Upload the entire benchmark directory recursively
|
||||
aws s3 cp --only-show-errors --recursive "${RUNNER_TEMP}" s3://${S3_BUCKET}/${S3_PREFIX}/
|
||||
|
||||
echo "Upload completed"
|
||||
|
||||
- name: Delete Neon Project
|
||||
if: ${{ always() }}
|
||||
uses: ./.github/actions/neon-project-delete
|
||||
with:
|
||||
project_id: ${{ steps.create-neon-project-tpcc.outputs.project_id }}
|
||||
api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }}
|
||||
api_host: console.neon.tech # production (!)
|
||||
48
.github/workflows/proxy-benchmark.yml
vendored
48
.github/workflows/proxy-benchmark.yml
vendored
@@ -3,7 +3,7 @@ name: Periodic proxy performance test on unit-perf-aws-arm runners
|
||||
on:
|
||||
push: # TODO: remove after testing
|
||||
branches:
|
||||
- test-proxy-bench # Runs on pushes to branches starting with test-proxy-bench
|
||||
- test-proxy-bench # Runs on pushes to test-proxy-bench branch
|
||||
# schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
statuses: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: [self-hosted, unit-perf-aws-arm]
|
||||
runs-on: [ self-hosted, unit-perf-aws-arm ]
|
||||
timeout-minutes: 60 # 1h timeout
|
||||
container:
|
||||
image: ghcr.io/neondatabase/build-tools:pinned-bookworm
|
||||
@@ -55,30 +55,58 @@ jobs:
|
||||
{
|
||||
echo "PROXY_BENCH_PATH=$PROXY_BENCH_PATH"
|
||||
echo "NEON_DIR=${RUNNER_TEMP}/neon"
|
||||
echo "NEON_PROXY_PATH=${RUNNER_TEMP}/neon/bin/proxy"
|
||||
echo "TEST_OUTPUT=${PROXY_BENCH_PATH}/test_output"
|
||||
echo ""
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
- name: Run proxy-bench
|
||||
run: ${PROXY_BENCH_PATH}/run.sh
|
||||
- name: Cache poetry deps
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry/virtualenvs
|
||||
key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-bookworm-${{ hashFiles('poetry.lock') }}
|
||||
|
||||
- name: Ingest Bench Results # neon repo script
|
||||
- name: Install Python deps
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: ./scripts/pysync
|
||||
|
||||
- name: show ulimits
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
ulimit -a
|
||||
|
||||
- name: Run proxy-bench
|
||||
working-directory: ${{ env.PROXY_BENCH_PATH }}
|
||||
run: ./run.sh --with-grafana --bare-metal
|
||||
|
||||
- name: Ingest Bench Results
|
||||
if: always()
|
||||
working-directory: ${{ env.NEON_DIR }}
|
||||
run: |
|
||||
mkdir -p $TEST_OUTPUT
|
||||
python $NEON_DIR/scripts/proxy_bench_results_ingest.py --out $TEST_OUTPUT
|
||||
|
||||
- name: Push Metrics to Proxy perf database
|
||||
shell: bash -euxo pipefail {0}
|
||||
if: always()
|
||||
env:
|
||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PROXY_TEST_RESULT_CONNSTR }}"
|
||||
REPORT_FROM: $TEST_OUTPUT
|
||||
working-directory: ${{ env.NEON_DIR }}
|
||||
run: $NEON_DIR/scripts/generate_and_push_perf_report.sh
|
||||
|
||||
- name: Docker cleanup
|
||||
if: always()
|
||||
run: docker compose down
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
run: echo "Proxy bench job failed" && exit 1
|
||||
run: echo "Proxy bench job failed" && exit 1
|
||||
|
||||
- name: Cleanup Test Resources
|
||||
if: always()
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: |
|
||||
# Cleanup the test resources
|
||||
if [[ -d "${TEST_OUTPUT}" ]]; then
|
||||
rm -rf ${TEST_OUTPUT}
|
||||
fi
|
||||
if [[ -d "${PROXY_BENCH_PATH}/test_output" ]]; then
|
||||
rm -rf ${PROXY_BENCH_PATH}/test_output
|
||||
fi
|
||||
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -1388,6 +1388,7 @@ dependencies = [
|
||||
"tower-http",
|
||||
"tower-otel",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
"tracing-utils",
|
||||
@@ -7935,11 +7936,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-appender"
|
||||
version = "0.2.2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
|
||||
checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"thiserror 1.0.69",
|
||||
"time",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
@@ -145,7 +145,7 @@ num-traits = "0.2.19"
|
||||
once_cell = "1.13"
|
||||
opentelemetry = "0.30"
|
||||
opentelemetry_sdk = "0.30"
|
||||
opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-blocking-client"] }
|
||||
opentelemetry-semantic-conventions = "0.30"
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
||||
@@ -222,6 +222,7 @@ tracing-log = "0.2"
|
||||
tracing-opentelemetry = "0.31"
|
||||
tracing-serde = "0.2.0"
|
||||
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
||||
tracing-appender = "0.2.3"
|
||||
try-lock = "0.2.5"
|
||||
test-log = { version = "0.2.17", default-features = false, features = ["log"] }
|
||||
twox-hash = { version = "1.6.3", default-features = false }
|
||||
|
||||
51
build-tools/package-lock.json
generated
51
build-tools/package-lock.json
generated
@@ -6,7 +6,7 @@
|
||||
"": {
|
||||
"name": "build-tools",
|
||||
"devDependencies": {
|
||||
"@redocly/cli": "1.34.4",
|
||||
"@redocly/cli": "1.34.5",
|
||||
"@sourcemeta/jsonschema": "10.0.0"
|
||||
}
|
||||
},
|
||||
@@ -472,9 +472,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@redocly/cli": {
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.4.tgz",
|
||||
"integrity": "sha512-seH/GgrjSB1EeOsgJ/4Ct6Jk2N7sh12POn/7G8UQFARMyUMJpe1oHtBwT2ndfp4EFCpgBAbZ/82Iw6dwczNxEA==",
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.5.tgz",
|
||||
"integrity": "sha512-5IEwxs7SGP5KEXjBKLU8Ffdz9by/KqNSeBk6YUVQaGxMXK//uYlTJIPntgUXbo1KAGG2d2q2XF8y4iFz6qNeiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -484,14 +484,14 @@
|
||||
"@opentelemetry/sdk-trace-node": "1.26.0",
|
||||
"@opentelemetry/semantic-conventions": "1.27.0",
|
||||
"@redocly/config": "^0.22.0",
|
||||
"@redocly/openapi-core": "1.34.4",
|
||||
"@redocly/respect-core": "1.34.4",
|
||||
"@redocly/openapi-core": "1.34.5",
|
||||
"@redocly/respect-core": "1.34.5",
|
||||
"abort-controller": "^3.0.0",
|
||||
"chokidar": "^3.5.1",
|
||||
"colorette": "^1.2.0",
|
||||
"core-js": "^3.32.1",
|
||||
"dotenv": "16.4.7",
|
||||
"form-data": "^4.0.0",
|
||||
"form-data": "^4.0.4",
|
||||
"get-port-please": "^3.0.1",
|
||||
"glob": "^7.1.6",
|
||||
"handlebars": "^4.7.6",
|
||||
@@ -522,9 +522,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@redocly/openapi-core": {
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.4.tgz",
|
||||
"integrity": "sha512-hf53xEgpXIgWl3b275PgZU3OTpYh1RoD2LHdIfQ1JzBNTWsiNKczTEsI/4Tmh2N1oq9YcphhSMyk3lDh85oDjg==",
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.5.tgz",
|
||||
"integrity": "sha512-0EbE8LRbkogtcCXU7liAyC00n9uNG9hJ+eMyHFdUsy9lB/WGqnEBgwjA9q2cyzAVcdTkQqTBBU1XePNnN3OijA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -544,21 +544,21 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@redocly/respect-core": {
|
||||
"version": "1.34.4",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.4.tgz",
|
||||
"integrity": "sha512-MitKyKyQpsizA4qCVv+MjXL4WltfhFQAoiKiAzrVR1Kusro3VhYb6yJuzoXjiJhR0ukLP5QOP19Vcs7qmj9dZg==",
|
||||
"version": "1.34.5",
|
||||
"resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.5.tgz",
|
||||
"integrity": "sha512-GheC/g/QFztPe9UA9LamooSplQuy9pe0Yr8XGTqkz0ahivLDl7svoy/LSQNn1QH3XGtLKwFYMfTwFR2TAYyh5Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "^7.6.0",
|
||||
"@redocly/ajv": "8.11.2",
|
||||
"@redocly/openapi-core": "1.34.4",
|
||||
"@redocly/openapi-core": "1.34.5",
|
||||
"better-ajv-errors": "^1.2.0",
|
||||
"colorette": "^2.0.20",
|
||||
"concat-stream": "^2.0.0",
|
||||
"cookie": "^0.7.2",
|
||||
"dotenv": "16.4.7",
|
||||
"form-data": "4.0.0",
|
||||
"form-data": "^4.0.4",
|
||||
"jest-diff": "^29.3.1",
|
||||
"jest-matcher-utils": "^29.3.1",
|
||||
"js-yaml": "4.1.0",
|
||||
@@ -582,21 +582,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@redocly/respect-core/node_modules/form-data": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/@sinclair/typebox": {
|
||||
"version": "0.27.8",
|
||||
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
|
||||
@@ -1345,9 +1330,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz",
|
||||
"integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "build-tools",
|
||||
"private": true,
|
||||
"devDependencies": {
|
||||
"@redocly/cli": "1.34.4",
|
||||
"@redocly/cli": "1.34.5",
|
||||
"@sourcemeta/jsonschema": "10.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,13 @@ commands:
|
||||
- name: postgres-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
# Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see
|
||||
# https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29
|
||||
# but it's enabled by default and it doesn't filter out invalid databases, see
|
||||
# https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67
|
||||
# so if it hits one, it starts spamming logs
|
||||
# ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -26,7 +26,13 @@ commands:
|
||||
- name: postgres-exporter
|
||||
user: nobody
|
||||
sysvInitAction: respawn
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml'
|
||||
# Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see
|
||||
# https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29
|
||||
# but it's enabled by default and it doesn't filter out invalid databases, see
|
||||
# https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67
|
||||
# so if it hits one, it starts spamming logs
|
||||
# ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0
|
||||
shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml'
|
||||
- name: pgbouncer-exporter
|
||||
user: postgres
|
||||
sysvInitAction: respawn
|
||||
|
||||
@@ -62,6 +62,7 @@ tokio-stream.workspace = true
|
||||
tonic.workspace = true
|
||||
tower-otel.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-appender.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-utils.workspace = true
|
||||
|
||||
@@ -52,8 +52,14 @@ stateDiagram-v2
|
||||
Init --> Running : Started Postgres
|
||||
Running --> TerminationPendingFast : Requested termination
|
||||
Running --> TerminationPendingImmediate : Requested termination
|
||||
Running --> ConfigurationPending : Received a /configure request with spec
|
||||
Running --> RefreshConfigurationPending : Received a /refresh_configuration request, compute node will pull a new spec and reconfigure
|
||||
RefreshConfigurationPending --> RefreshConfiguration: Received compute spec and started configuration
|
||||
RefreshConfiguration --> Running : Compute has been re-configured
|
||||
RefreshConfiguration --> RefreshConfigurationPending : Configuration failed and to be retried
|
||||
TerminationPendingFast --> Terminated compute with 30s delay for cplane to inspect status
|
||||
TerminationPendingImmediate --> Terminated : Terminated compute immediately
|
||||
Failed --> RefreshConfigurationPending : Received a /refresh_configuration request
|
||||
Failed --> [*] : Compute exited
|
||||
Terminated --> [*] : Compute exited
|
||||
```
|
||||
|
||||
@@ -49,9 +49,10 @@ use compute_tools::compute::{
|
||||
BUILD_TAG, ComputeNode, ComputeNodeParams, forward_termination_signal,
|
||||
};
|
||||
use compute_tools::extension_server::get_pg_version_string;
|
||||
use compute_tools::logger::*;
|
||||
use compute_tools::params::*;
|
||||
use compute_tools::pg_isready::get_pg_isready_bin;
|
||||
use compute_tools::spec::*;
|
||||
use compute_tools::{hadron_metrics, installed_extensions, logger::*};
|
||||
use rlimit::{Resource, setrlimit};
|
||||
use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM};
|
||||
use signal_hook::iterator::Signals;
|
||||
@@ -194,11 +195,19 @@ fn main() -> Result<()> {
|
||||
.build()?;
|
||||
let _rt_guard = runtime.enter();
|
||||
|
||||
let tracing_provider = init(cli.dev)?;
|
||||
let mut log_dir = None;
|
||||
if cli.lakebase_mode {
|
||||
log_dir = std::env::var("COMPUTE_CTL_LOG_DIRECTORY").ok();
|
||||
}
|
||||
|
||||
let (tracing_provider, _file_logs_guard) = init(cli.dev, log_dir)?;
|
||||
|
||||
// enable core dumping for all child processes
|
||||
setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
|
||||
|
||||
installed_extensions::initialize_metrics();
|
||||
hadron_metrics::initialize_metrics();
|
||||
|
||||
let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?;
|
||||
|
||||
let config = get_config(&cli)?;
|
||||
@@ -226,7 +235,12 @@ fn main() -> Result<()> {
|
||||
cli.installed_extensions_collection_interval,
|
||||
)),
|
||||
pg_init_timeout: cli.pg_init_timeout.map(Duration::from_secs),
|
||||
pg_isready_bin: get_pg_isready_bin(&cli.pgbin),
|
||||
instance_id: std::env::var("INSTANCE_ID").ok(),
|
||||
lakebase_mode: cli.lakebase_mode,
|
||||
build_tag: BUILD_TAG.to_string(),
|
||||
control_plane_uri: cli.control_plane_uri,
|
||||
config_path_test_only: cli.config,
|
||||
},
|
||||
config,
|
||||
)?;
|
||||
@@ -238,8 +252,14 @@ fn main() -> Result<()> {
|
||||
deinit_and_exit(tracing_provider, exit_code);
|
||||
}
|
||||
|
||||
fn init(dev_mode: bool) -> Result<Option<tracing_utils::Provider>> {
|
||||
let provider = init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||
fn init(
|
||||
dev_mode: bool,
|
||||
log_dir: Option<String>,
|
||||
) -> Result<(
|
||||
Option<tracing_utils::Provider>,
|
||||
Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
)> {
|
||||
let (provider, file_logs_guard) = init_tracing_and_logging(DEFAULT_LOG_LEVEL, &log_dir)?;
|
||||
|
||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||
thread::spawn(move || {
|
||||
@@ -250,7 +270,7 @@ fn init(dev_mode: bool) -> Result<Option<tracing_utils::Provider>> {
|
||||
|
||||
info!("compute build_tag: {}", &BUILD_TAG.to_string());
|
||||
|
||||
Ok(provider)
|
||||
Ok((provider, file_logs_guard))
|
||||
}
|
||||
|
||||
fn get_config(cli: &Cli) -> Result<ComputeConfig> {
|
||||
|
||||
@@ -21,6 +21,7 @@ use postgres::NoTls;
|
||||
use postgres::error::SqlState;
|
||||
use remote_storage::{DownloadError, RemotePath};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::ffi::OsString;
|
||||
use std::os::unix::fs::{PermissionsExt, symlink};
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Stdio};
|
||||
@@ -40,8 +41,9 @@ use utils::shard::{ShardCount, ShardIndex, ShardNumber};
|
||||
|
||||
use crate::configurator::launch_configurator;
|
||||
use crate::disk_quota::set_disk_quota;
|
||||
use crate::hadron_metrics::COMPUTE_ATTACHED;
|
||||
use crate::installed_extensions::get_installed_extensions;
|
||||
use crate::logger::startup_context_from_env;
|
||||
use crate::logger::{self, startup_context_from_env};
|
||||
use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
||||
use crate::metrics::COMPUTE_CTL_UP;
|
||||
use crate::monitor::launch_monitor;
|
||||
@@ -113,11 +115,17 @@ pub struct ComputeNodeParams {
|
||||
|
||||
/// Interval for installed extensions collection
|
||||
pub installed_extensions_collection_interval: Arc<AtomicU64>,
|
||||
|
||||
/// Hadron instance ID of the compute node.
|
||||
pub instance_id: Option<String>,
|
||||
/// Timeout of PG compute startup in the Init state.
|
||||
pub pg_init_timeout: Option<Duration>,
|
||||
|
||||
// Path to the `pg_isready` binary.
|
||||
pub pg_isready_bin: String,
|
||||
pub lakebase_mode: bool,
|
||||
|
||||
pub build_tag: String,
|
||||
pub control_plane_uri: Option<String>,
|
||||
pub config_path_test_only: Option<OsString>,
|
||||
}
|
||||
|
||||
type TaskHandle = Mutex<Option<JoinHandle<()>>>;
|
||||
@@ -489,6 +497,7 @@ impl ComputeNode {
|
||||
port: this.params.external_http_port,
|
||||
config: this.compute_ctl_config.clone(),
|
||||
compute_id: this.params.compute_id.clone(),
|
||||
instance_id: this.params.instance_id.clone(),
|
||||
}
|
||||
.launch(&this);
|
||||
|
||||
@@ -1790,6 +1799,34 @@ impl ComputeNode {
|
||||
Ok::<(), anyhow::Error>(())
|
||||
}
|
||||
|
||||
// Signal to the configurator to refresh the configuration by pulling a new spec from the HCC.
|
||||
// Note that this merely triggers a notification on a condition variable the configurator thread
|
||||
// waits on. The configurator thread (in configurator.rs) pulls the new spec from the HCC and
|
||||
// applies it.
|
||||
pub async fn signal_refresh_configuration(&self) -> Result<()> {
|
||||
let states_allowing_configuration_refresh = [
|
||||
ComputeStatus::Running,
|
||||
ComputeStatus::Failed,
|
||||
ComputeStatus::RefreshConfigurationPending,
|
||||
];
|
||||
|
||||
let mut state = self.state.lock().expect("state lock poisoned");
|
||||
if states_allowing_configuration_refresh.contains(&state.status) {
|
||||
state.status = ComputeStatus::RefreshConfigurationPending;
|
||||
self.state_changed.notify_all();
|
||||
Ok(())
|
||||
} else if state.status == ComputeStatus::Init {
|
||||
// If the compute is in Init state, we can't refresh the configuration immediately,
|
||||
// but we should be able to do that soon.
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow::anyhow!(
|
||||
"Cannot refresh compute configuration in state {:?}",
|
||||
state.status
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapped this around `pg_ctl reload`, but right now we don't use
|
||||
// `pg_ctl` for start / stop.
|
||||
#[instrument(skip_all)]
|
||||
@@ -1962,6 +1999,8 @@ impl ComputeNode {
|
||||
// wait
|
||||
ComputeStatus::Init
|
||||
| ComputeStatus::Configuration
|
||||
| ComputeStatus::RefreshConfiguration
|
||||
| ComputeStatus::RefreshConfigurationPending
|
||||
| ComputeStatus::Empty => {
|
||||
state = self.state_changed.wait(state).unwrap();
|
||||
}
|
||||
@@ -2518,6 +2557,34 @@ LIMIT 100",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the compute spec and update related metrics.
|
||||
/// This is the central place where pspec is updated.
|
||||
pub fn set_spec(params: &ComputeNodeParams, state: &mut ComputeState, pspec: ParsedSpec) {
|
||||
state.pspec = Some(pspec);
|
||||
ComputeNode::update_attached_metric(params, state);
|
||||
let _ = logger::update_ids(¶ms.instance_id, &Some(params.compute_id.clone()));
|
||||
}
|
||||
|
||||
pub fn update_attached_metric(params: &ComputeNodeParams, state: &mut ComputeState) {
|
||||
// Update the pg_cctl_attached gauge when all identifiers are available.
|
||||
if let Some(instance_id) = ¶ms.instance_id {
|
||||
if let Some(pspec) = &state.pspec {
|
||||
// Clear all values in the metric
|
||||
COMPUTE_ATTACHED.reset();
|
||||
|
||||
// Set new metric value
|
||||
COMPUTE_ATTACHED
|
||||
.with_label_values(&[
|
||||
¶ms.compute_id,
|
||||
instance_id,
|
||||
&pspec.tenant_id.to_string(),
|
||||
&pspec.timeline_id.to_string(),
|
||||
])
|
||||
.set(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> {
|
||||
|
||||
@@ -90,6 +90,7 @@ impl ComputeNode {
|
||||
}
|
||||
|
||||
/// If there is a prewarm request ongoing, return `false`, `true` otherwise.
|
||||
/// Has a failpoint "compute-prewarm"
|
||||
pub fn prewarm_lfc(self: &Arc<Self>, from_endpoint: Option<String>) -> bool {
|
||||
{
|
||||
let state = &mut self.state.lock().unwrap().lfc_prewarm_state;
|
||||
@@ -112,9 +113,8 @@ impl ComputeNode {
|
||||
Err(err) => {
|
||||
crate::metrics::LFC_PREWARM_ERRORS.inc();
|
||||
error!(%err, "could not prewarm LFC");
|
||||
|
||||
LfcPrewarmState::Failed {
|
||||
error: err.to_string(),
|
||||
error: format!("{err:#}"),
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -135,16 +135,20 @@ impl ComputeNode {
|
||||
async fn prewarm_impl(&self, from_endpoint: Option<String>) -> Result<bool> {
|
||||
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(from_endpoint)?;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
fail::fail_point!("compute-prewarm", |_| {
|
||||
bail!("prewarm configured to fail because of a failpoint")
|
||||
});
|
||||
|
||||
info!(%url, "requesting LFC state from endpoint storage");
|
||||
let request = Client::new().get(&url).bearer_auth(token);
|
||||
let res = request.send().await.context("querying endpoint storage")?;
|
||||
let status = res.status();
|
||||
match status {
|
||||
match res.status() {
|
||||
StatusCode::OK => (),
|
||||
StatusCode::NOT_FOUND => {
|
||||
return Ok(false);
|
||||
}
|
||||
_ => bail!("{status} querying endpoint storage"),
|
||||
status => bail!("{status} querying endpoint storage"),
|
||||
}
|
||||
|
||||
let mut uncompressed = Vec::new();
|
||||
@@ -205,7 +209,7 @@ impl ComputeNode {
|
||||
crate::metrics::LFC_OFFLOAD_ERRORS.inc();
|
||||
error!(%err, "could not offload LFC state to endpoint storage");
|
||||
self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed {
|
||||
error: err.to_string(),
|
||||
error: format!("{err:#}"),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -213,16 +217,22 @@ impl ComputeNode {
|
||||
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?;
|
||||
info!(%url, "requesting LFC state from Postgres");
|
||||
|
||||
let mut compressed = Vec::new();
|
||||
ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
|
||||
let row = ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
|
||||
.await
|
||||
.context("connecting to postgres")?
|
||||
.query_one("select neon.get_local_cache_state()", &[])
|
||||
.await
|
||||
.context("querying LFC state")?
|
||||
.try_get::<usize, &[u8]>(0)
|
||||
.context("deserializing LFC state")
|
||||
.map(ZstdEncoder::new)?
|
||||
.context("querying LFC state")?;
|
||||
let state = row
|
||||
.try_get::<usize, Option<&[u8]>>(0)
|
||||
.context("deserializing LFC state")?;
|
||||
let Some(state) = state else {
|
||||
info!(%url, "empty LFC state, not exporting");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut compressed = Vec::new();
|
||||
ZstdEncoder::new(state)
|
||||
.read_to_end(&mut compressed)
|
||||
.await
|
||||
.context("compressing LFC state")?;
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use crate::compute::ComputeNode;
|
||||
use anyhow::{Context, Result, bail};
|
||||
use compute_api::{
|
||||
responses::{LfcPrewarmState, PromoteState, SafekeepersLsn},
|
||||
spec::ComputeMode,
|
||||
};
|
||||
use compute_api::responses::{LfcPrewarmState, PromoteConfig, PromoteState};
|
||||
use compute_api::spec::ComputeMode;
|
||||
use itertools::Itertools;
|
||||
use std::collections::HashMap;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
use tracing::info;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
impl ComputeNode {
|
||||
@@ -13,21 +14,22 @@ impl ComputeNode {
|
||||
/// and http client disconnects, this does not stop promotion, and subsequent
|
||||
/// calls block until promote finishes.
|
||||
/// Called by control plane on secondary after primary endpoint is terminated
|
||||
pub async fn promote(self: &Arc<Self>, safekeepers_lsn: SafekeepersLsn) -> PromoteState {
|
||||
/// Has a failpoint "compute-promotion"
|
||||
pub async fn promote(self: &Arc<Self>, cfg: PromoteConfig) -> PromoteState {
|
||||
let cloned = self.clone();
|
||||
let promote_fn = async move || {
|
||||
let Err(err) = cloned.promote_impl(cfg).await else {
|
||||
return PromoteState::Completed;
|
||||
};
|
||||
tracing::error!(%err, "promoting");
|
||||
PromoteState::Failed {
|
||||
error: format!("{err:#}"),
|
||||
}
|
||||
};
|
||||
|
||||
let start_promotion = || {
|
||||
let (tx, rx) = tokio::sync::watch::channel(PromoteState::NotPromoted);
|
||||
tokio::spawn(async move {
|
||||
tx.send(match cloned.promote_impl(safekeepers_lsn).await {
|
||||
Ok(_) => PromoteState::Completed,
|
||||
Err(err) => {
|
||||
tracing::error!(%err, "promoting");
|
||||
PromoteState::Failed {
|
||||
error: err.to_string(),
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
tokio::spawn(async move { tx.send(promote_fn().await) });
|
||||
rx
|
||||
};
|
||||
|
||||
@@ -47,9 +49,7 @@ impl ComputeNode {
|
||||
task.borrow().clone()
|
||||
}
|
||||
|
||||
// Why do we have to supply safekeepers?
|
||||
// For secondary we use primary_connection_conninfo so safekeepers field is empty
|
||||
async fn promote_impl(&self, safekeepers_lsn: SafekeepersLsn) -> Result<()> {
|
||||
async fn promote_impl(&self, mut cfg: PromoteConfig) -> Result<()> {
|
||||
{
|
||||
let state = self.state.lock().unwrap();
|
||||
let mode = &state.pspec.as_ref().unwrap().spec.mode;
|
||||
@@ -73,7 +73,7 @@ impl ComputeNode {
|
||||
.await
|
||||
.context("connecting to postgres")?;
|
||||
|
||||
let primary_lsn = safekeepers_lsn.wal_flush_lsn;
|
||||
let primary_lsn = cfg.wal_flush_lsn;
|
||||
let mut last_wal_replay_lsn: Lsn = Lsn::INVALID;
|
||||
const RETRIES: i32 = 20;
|
||||
for i in 0..=RETRIES {
|
||||
@@ -86,7 +86,7 @@ impl ComputeNode {
|
||||
if last_wal_replay_lsn >= primary_lsn {
|
||||
break;
|
||||
}
|
||||
tracing::info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}");
|
||||
info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
if last_wal_replay_lsn < primary_lsn {
|
||||
@@ -96,7 +96,7 @@ impl ComputeNode {
|
||||
// using $1 doesn't work with ALTER SYSTEM SET
|
||||
let safekeepers_sql = format!(
|
||||
"ALTER SYSTEM SET neon.safekeepers='{}'",
|
||||
safekeepers_lsn.safekeepers
|
||||
cfg.spec.safekeeper_connstrings.join(",")
|
||||
);
|
||||
client
|
||||
.query(&safekeepers_sql, &[])
|
||||
@@ -106,6 +106,12 @@ impl ComputeNode {
|
||||
.query("SELECT pg_reload_conf()", &[])
|
||||
.await
|
||||
.context("reloading postgres config")?;
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
fail::fail_point!("compute-promotion", |_| {
|
||||
bail!("promotion configured to fail because of a failpoint")
|
||||
});
|
||||
|
||||
let row = client
|
||||
.query_one("SELECT * FROM pg_promote()", &[])
|
||||
.await
|
||||
@@ -125,8 +131,36 @@ impl ComputeNode {
|
||||
bail!("replica in read only mode after promotion");
|
||||
}
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.pspec.as_mut().unwrap().spec.mode = ComputeMode::Primary;
|
||||
Ok(())
|
||||
{
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let spec = &mut state.pspec.as_mut().unwrap().spec;
|
||||
spec.mode = ComputeMode::Primary;
|
||||
let new_conf = cfg.spec.cluster.postgresql_conf.as_mut().unwrap();
|
||||
let existing_conf = spec.cluster.postgresql_conf.as_ref().unwrap();
|
||||
Self::merge_spec(new_conf, existing_conf);
|
||||
}
|
||||
info!("applied new spec, reconfiguring as primary");
|
||||
self.reconfigure()
|
||||
}
|
||||
|
||||
/// Merge old and new Postgres conf specs to apply on secondary.
|
||||
/// Change new spec's port and safekeepers since they are supplied
|
||||
/// differenly
|
||||
fn merge_spec(new_conf: &mut String, existing_conf: &str) {
|
||||
let mut new_conf_set: HashMap<&str, &str> = new_conf
|
||||
.split_terminator('\n')
|
||||
.map(|e| e.split_once("=").expect("invalid item"))
|
||||
.collect();
|
||||
new_conf_set.remove("neon.safekeepers");
|
||||
|
||||
let existing_conf_set: HashMap<&str, &str> = existing_conf
|
||||
.split_terminator('\n')
|
||||
.map(|e| e.split_once("=").expect("invalid item"))
|
||||
.collect();
|
||||
new_conf_set.insert("port", existing_conf_set["port"]);
|
||||
*new_conf = new_conf_set
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,40 @@
|
||||
use std::sync::Arc;
|
||||
use std::fs::File;
|
||||
use std::thread;
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use compute_api::responses::ComputeStatus;
|
||||
use anyhow::Result;
|
||||
use compute_api::responses::{ComputeConfig, ComputeStatus};
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::compute::{ComputeNode, ParsedSpec};
|
||||
use crate::spec::get_config_from_control_plane;
|
||||
|
||||
#[instrument(skip_all)]
|
||||
fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||
info!("waiting for reconfiguration requests");
|
||||
loop {
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
/* BEGIN_HADRON */
|
||||
// RefreshConfiguration should only be used inside the loop
|
||||
assert_ne!(state.status, ComputeStatus::RefreshConfiguration);
|
||||
/* END_HADRON */
|
||||
|
||||
// We have to re-check the status after re-acquiring the lock because it could be that
|
||||
// the status has changed while we were waiting for the lock, and we might not need to
|
||||
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
|
||||
// we are waiting for a condition variable that will never be signaled.
|
||||
if state.status != ComputeStatus::ConfigurationPending {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
if compute.params.lakebase_mode {
|
||||
while state.status != ComputeStatus::ConfigurationPending
|
||||
&& state.status != ComputeStatus::RefreshConfigurationPending
|
||||
&& state.status != ComputeStatus::Failed
|
||||
{
|
||||
info!("configurator: compute status: {:?}, sleeping", state.status);
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
} else {
|
||||
// We have to re-check the status after re-acquiring the lock because it could be that
|
||||
// the status has changed while we were waiting for the lock, and we might not need to
|
||||
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
|
||||
// we are waiting for a condition variable that will never be signaled.
|
||||
if state.status != ComputeStatus::ConfigurationPending {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// Re-check the status after waking up
|
||||
@@ -37,6 +54,133 @@ fn configurator_main_loop(compute: &Arc<ComputeNode>) {
|
||||
// XXX: used to test that API is blocking
|
||||
// std::thread::sleep(std::time::Duration::from_millis(10000));
|
||||
|
||||
compute.set_status(new_status);
|
||||
} else if state.status == ComputeStatus::RefreshConfigurationPending {
|
||||
info!(
|
||||
"compute node suspects its configuration is out of date, now refreshing configuration"
|
||||
);
|
||||
state.set_status(ComputeStatus::RefreshConfiguration, &compute.state_changed);
|
||||
// Drop the lock guard here to avoid holding the lock while downloading config from the control plane / HCC.
|
||||
// This is the only thread that can move compute_ctl out of the `RefreshConfiguration` state, so it
|
||||
// is safe to drop the lock like this.
|
||||
drop(state);
|
||||
|
||||
let get_config_result: anyhow::Result<ComputeConfig> =
|
||||
if let Some(config_path) = &compute.params.config_path_test_only {
|
||||
// This path is only to make testing easier. In production we always get the config from the HCC.
|
||||
info!(
|
||||
"reloading config.json from path: {}",
|
||||
config_path.to_string_lossy()
|
||||
);
|
||||
let path = Path::new(config_path);
|
||||
if let Ok(file) = File::open(path) {
|
||||
match serde_json::from_reader::<File, ComputeConfig>(file) {
|
||||
Ok(config) => Ok(config),
|
||||
Err(e) => {
|
||||
error!("could not parse config file: {}", e);
|
||||
Err(anyhow::anyhow!("could not parse config file: {}", e))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"could not open config file at path: {:?}",
|
||||
config_path.to_string_lossy()
|
||||
);
|
||||
Err(anyhow::anyhow!(
|
||||
"could not open config file at path: {}",
|
||||
config_path.to_string_lossy()
|
||||
))
|
||||
}
|
||||
} else if let Some(control_plane_uri) = &compute.params.control_plane_uri {
|
||||
get_config_from_control_plane(control_plane_uri, &compute.params.compute_id)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("config_path_test_only is not set"))
|
||||
};
|
||||
|
||||
// Parse any received ComputeSpec and transpose the result into a Result<Option<ParsedSpec>>.
|
||||
let parsed_spec_result: Result<Option<ParsedSpec>> =
|
||||
get_config_result.and_then(|config| {
|
||||
if let Some(spec) = config.spec {
|
||||
if let Ok(pspec) = ParsedSpec::try_from(spec) {
|
||||
Ok(Some(pspec))
|
||||
} else {
|
||||
Err(anyhow::anyhow!("could not parse spec"))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
});
|
||||
|
||||
let new_status: ComputeStatus;
|
||||
match parsed_spec_result {
|
||||
// Control plane (HCM) returned a spec and we were able to parse it.
|
||||
Ok(Some(pspec)) => {
|
||||
{
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
// Defensive programming to make sure this thread is indeed the only one that can move the compute
|
||||
// node out of the `RefreshConfiguration` state. Would be nice if we can encode this invariant
|
||||
// into the type system.
|
||||
assert_eq!(state.status, ComputeStatus::RefreshConfiguration);
|
||||
|
||||
if state.pspec.as_ref().map(|ps| ps.pageserver_connstr.clone())
|
||||
== Some(pspec.pageserver_connstr.clone())
|
||||
{
|
||||
info!(
|
||||
"Refresh configuration: Retrieved spec is the same as the current spec. Waiting for control plane to update the spec before attempting reconfiguration."
|
||||
);
|
||||
state.status = ComputeStatus::Running;
|
||||
compute.state_changed.notify_all();
|
||||
drop(state);
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
continue;
|
||||
}
|
||||
// state.pspec is consumed by compute.reconfigure() below. Note that compute.reconfigure() will acquire
|
||||
// the compute.state lock again so we need to have the lock guard go out of scope here. We could add a
|
||||
// "locked" variant of compute.reconfigure() that takes the lock guard as an argument to make this cleaner,
|
||||
// but it's not worth forking the codebase too much for this minor point alone right now.
|
||||
state.pspec = Some(pspec);
|
||||
}
|
||||
match compute.reconfigure() {
|
||||
Ok(_) => {
|
||||
info!("Refresh configuration: compute node configured");
|
||||
new_status = ComputeStatus::Running;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Refresh configuration: could not configure compute node: {}",
|
||||
e
|
||||
);
|
||||
// Set the compute node back to the `RefreshConfigurationPending` state if the configuration
|
||||
// was not successful. It should be okay to treat this situation the same as if the loop
|
||||
// hasn't executed yet as long as the detection side keeps notifying.
|
||||
new_status = ComputeStatus::RefreshConfigurationPending;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Control plane (HCM)'s response does not contain a spec. This is the "Empty" attachment case.
|
||||
Ok(None) => {
|
||||
info!(
|
||||
"Compute Manager signaled that this compute is no longer attached to any storage. Exiting."
|
||||
);
|
||||
// We just immediately terminate the whole compute_ctl in this case. It's not necessary to attempt a
|
||||
// clean shutdown as Postgres is probably not responding anyway (which is why we are in this refresh
|
||||
// configuration state).
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Various error cases:
|
||||
// - The request to the control plane (HCM) either failed or returned a malformed spec.
|
||||
// - compute_ctl itself is configured incorrectly (e.g., compute_id is not set).
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Refresh configuration: error getting a parsed spec: {:?}",
|
||||
e
|
||||
);
|
||||
new_status = ComputeStatus::RefreshConfigurationPending;
|
||||
// We may be dealing with an overloaded HCM if we end up in this path. Backoff 5 seconds before
|
||||
// retrying to avoid hammering the HCM.
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
}
|
||||
}
|
||||
compute.set_status(new_status);
|
||||
} else if state.status == ComputeStatus::Failed {
|
||||
info!("compute node is now in Failed state, exiting");
|
||||
|
||||
@@ -16,13 +16,29 @@ use crate::http::JsonResponse;
|
||||
#[derive(Clone, Debug)]
|
||||
pub(in crate::http) struct Authorize {
|
||||
compute_id: String,
|
||||
// BEGIN HADRON
|
||||
// Hadron instance ID. Only set if it's a Lakebase V1 a.k.a. Hadron instance.
|
||||
instance_id: Option<String>,
|
||||
// END HADRON
|
||||
jwks: JwkSet,
|
||||
validation: Validation,
|
||||
}
|
||||
|
||||
impl Authorize {
|
||||
pub fn new(compute_id: String, jwks: JwkSet) -> Self {
|
||||
pub fn new(compute_id: String, instance_id: Option<String>, jwks: JwkSet) -> Self {
|
||||
let mut validation = Validation::new(Algorithm::EdDSA);
|
||||
|
||||
// BEGIN HADRON
|
||||
let use_rsa = jwks.keys.iter().any(|jwk| {
|
||||
jwk.common
|
||||
.key_algorithm
|
||||
.is_some_and(|alg| alg == jsonwebtoken::jwk::KeyAlgorithm::RS256)
|
||||
});
|
||||
if use_rsa {
|
||||
validation = Validation::new(Algorithm::RS256);
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
validation.validate_exp = true;
|
||||
// Unused by the control plane
|
||||
validation.validate_nbf = false;
|
||||
@@ -34,6 +50,7 @@ impl Authorize {
|
||||
|
||||
Self {
|
||||
compute_id,
|
||||
instance_id,
|
||||
jwks,
|
||||
validation,
|
||||
}
|
||||
@@ -47,10 +64,20 @@ impl AsyncAuthorizeRequest<Body> for Authorize {
|
||||
|
||||
fn authorize(&mut self, mut request: Request<Body>) -> Self::Future {
|
||||
let compute_id = self.compute_id.clone();
|
||||
let is_hadron_instance = self.instance_id.is_some();
|
||||
let jwks = self.jwks.clone();
|
||||
let validation = self.validation.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
// BEGIN HADRON
|
||||
// In Hadron deployments the "external" HTTP endpoint on compute_ctl can only be
|
||||
// accessed by trusted components (enforced by dblet network policy), so we can bypass
|
||||
// all auth here.
|
||||
if is_hadron_instance {
|
||||
return Ok(request);
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
let TypedHeader(Authorization(bearer)) = request
|
||||
.extract_parts::<TypedHeader<Authorization<Bearer>>>()
|
||||
.await
|
||||
|
||||
@@ -96,7 +96,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/SafekeepersLsn"
|
||||
$ref: "#/components/schemas/ComputeSchemaWithLsn"
|
||||
responses:
|
||||
200:
|
||||
description: Promote succeeded or wasn't started
|
||||
@@ -297,14 +297,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
properties:
|
||||
spec:
|
||||
# XXX: I don't want to explain current spec in the OpenAPI format,
|
||||
# as it could be changed really soon. Consider doing it later.
|
||||
type: object
|
||||
$ref: "#/components/schemas/ComputeSchema"
|
||||
responses:
|
||||
200:
|
||||
description: Compute configuration finished.
|
||||
@@ -591,18 +584,25 @@ components:
|
||||
type: string
|
||||
example: "1.0.0"
|
||||
|
||||
SafekeepersLsn:
|
||||
ComputeSchema:
|
||||
type: object
|
||||
required:
|
||||
- safekeepers
|
||||
- spec
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
ComputeSchemaWithLsn:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
- wal_flush_lsn
|
||||
properties:
|
||||
safekeepers:
|
||||
description: Primary replica safekeepers
|
||||
type: string
|
||||
spec:
|
||||
$ref: "#/components/schemas/ComputeState"
|
||||
wal_flush_lsn:
|
||||
description: Primary last WAL flush LSN
|
||||
type: string
|
||||
description: "last WAL flush LSN"
|
||||
example: "0/028F10D8"
|
||||
|
||||
LfcPrewarmState:
|
||||
type: object
|
||||
|
||||
@@ -43,7 +43,12 @@ pub(in crate::http) async fn configure(
|
||||
// configure request for tracing purposes.
|
||||
state.startup_span = Some(tracing::Span::current());
|
||||
|
||||
state.pspec = Some(pspec);
|
||||
if compute.params.lakebase_mode {
|
||||
ComputeNode::set_spec(&compute.params, &mut state, pspec);
|
||||
} else {
|
||||
state.pspec = Some(pspec);
|
||||
}
|
||||
|
||||
state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
34
compute_tools/src/http/routes/hadron_liveness_probe.rs
Normal file
34
compute_tools/src/http/routes/hadron_liveness_probe.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
use crate::pg_isready::pg_isready;
|
||||
use crate::{compute::ComputeNode, http::JsonResponse};
|
||||
use axum::{extract::State, http::StatusCode, response::Response};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// NOTE: NOT ENABLED YET
|
||||
/// Detect if the compute is alive.
|
||||
/// Called by the liveness probe of the compute container.
|
||||
pub(in crate::http) async fn hadron_liveness_probe(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Response {
|
||||
let port = match compute.params.connstr.port() {
|
||||
Some(port) => port,
|
||||
None => {
|
||||
return JsonResponse::error(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Failed to get the port from the connection string",
|
||||
);
|
||||
}
|
||||
};
|
||||
match pg_isready(&compute.params.pg_isready_bin, port) {
|
||||
Ok(_) => {
|
||||
// The connection is successful, so the compute is alive.
|
||||
// Return a 200 OK response.
|
||||
JsonResponse::success(StatusCode::OK, "ok")
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Hadron liveness probe failed: {}", e);
|
||||
// The connection failed, so the compute is not alive.
|
||||
// Return a 500 Internal Server Error response.
|
||||
JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@ use metrics::{Encoder, TextEncoder};
|
||||
|
||||
use crate::communicator_socket_client::connect_communicator_socket;
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::hadron_metrics;
|
||||
use crate::http::JsonResponse;
|
||||
use crate::metrics::collect;
|
||||
|
||||
@@ -21,11 +22,18 @@ pub(in crate::http) async fn get_metrics() -> Response {
|
||||
// When we call TextEncoder::encode() below, it will immediately return an
|
||||
// error if a metric family has no metrics, so we need to preemptively
|
||||
// filter out metric families with no metrics.
|
||||
let metrics = collect()
|
||||
let mut metrics = collect()
|
||||
.into_iter()
|
||||
.filter(|m| !m.get_metric().is_empty())
|
||||
.collect::<Vec<MetricFamily>>();
|
||||
|
||||
// Add Hadron metrics.
|
||||
let hadron_metrics: Vec<MetricFamily> = hadron_metrics::collect()
|
||||
.into_iter()
|
||||
.filter(|m| !m.get_metric().is_empty())
|
||||
.collect();
|
||||
metrics.extend(hadron_metrics);
|
||||
|
||||
let encoder = TextEncoder::new();
|
||||
let mut buffer = vec![];
|
||||
|
||||
|
||||
@@ -10,11 +10,13 @@ pub(in crate::http) mod extension_server;
|
||||
pub(in crate::http) mod extensions;
|
||||
pub(in crate::http) mod failpoints;
|
||||
pub(in crate::http) mod grants;
|
||||
pub(in crate::http) mod hadron_liveness_probe;
|
||||
pub(in crate::http) mod insights;
|
||||
pub(in crate::http) mod lfc;
|
||||
pub(in crate::http) mod metrics;
|
||||
pub(in crate::http) mod metrics_json;
|
||||
pub(in crate::http) mod promote;
|
||||
pub(in crate::http) mod refresh_configuration;
|
||||
pub(in crate::http) mod status;
|
||||
pub(in crate::http) mod terminate;
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use crate::http::JsonResponse;
|
||||
use axum::Form;
|
||||
use axum::extract::Json;
|
||||
use http::StatusCode;
|
||||
|
||||
pub(in crate::http) async fn promote(
|
||||
compute: axum::extract::State<std::sync::Arc<crate::compute::ComputeNode>>,
|
||||
Form(safekeepers_lsn): Form<compute_api::responses::SafekeepersLsn>,
|
||||
Json(cfg): Json<compute_api::responses::PromoteConfig>,
|
||||
) -> axum::response::Response {
|
||||
let state = compute.promote(safekeepers_lsn).await;
|
||||
if let compute_api::responses::PromoteState::Failed { error } = state {
|
||||
return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, error);
|
||||
let state = compute.promote(cfg).await;
|
||||
if let compute_api::responses::PromoteState::Failed { error: _ } = state {
|
||||
return JsonResponse::create_response(StatusCode::INTERNAL_SERVER_ERROR, state);
|
||||
}
|
||||
JsonResponse::success(StatusCode::OK, state)
|
||||
}
|
||||
|
||||
29
compute_tools/src/http/routes/refresh_configuration.rs
Normal file
29
compute_tools/src/http/routes/refresh_configuration.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
// This file is added by Hadron
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::State,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use http::StatusCode;
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS;
|
||||
use crate::http::JsonResponse;
|
||||
|
||||
/// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec
|
||||
/// from the HCC and attempt to reconfigure Postgres with the new spec. The method does not wait
|
||||
/// for the reconfiguration to complete. Rather, it simply delivers a signal that will cause
|
||||
/// configuration to be reloaded in a best effort manner. Invocation of this method does not
|
||||
/// guarantee that a reconfiguration will occur. The caller should consider keep sending this
|
||||
/// request while it believes that the compute configuration is out of date.
|
||||
pub(in crate::http) async fn refresh_configuration(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Response {
|
||||
POSTGRES_PAGESTREAM_REQUEST_ERRORS.inc();
|
||||
match compute.signal_refresh_configuration().await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e),
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::compute::{ComputeNode, forward_termination_signal};
|
||||
use crate::http::JsonResponse;
|
||||
use axum::extract::State;
|
||||
use axum::response::Response;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum_extra::extract::OptionalQuery;
|
||||
use compute_api::responses::{ComputeStatus, TerminateMode, TerminateResponse};
|
||||
use http::StatusCode;
|
||||
@@ -33,7 +33,29 @@ pub(in crate::http) async fn terminate(
|
||||
if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) {
|
||||
return JsonResponse::invalid_status(state.status);
|
||||
}
|
||||
|
||||
// If compute is Empty, there's no Postgres to terminate. The regular compute_ctl termination path
|
||||
// assumes Postgres to be configured and running, so we just special-handle this case by exiting
|
||||
// the process directly.
|
||||
if compute.params.lakebase_mode && state.status == ComputeStatus::Empty {
|
||||
drop(state);
|
||||
info!("terminating empty compute - will exit process");
|
||||
|
||||
// Queue a task to exit the process after 5 seconds. The 5-second delay aims to
|
||||
// give enough time for the HTTP response to be sent so that HCM doesn't get an abrupt
|
||||
// connection termination.
|
||||
tokio::spawn(async {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
info!("exiting process after terminating empty compute");
|
||||
std::process::exit(0);
|
||||
});
|
||||
|
||||
return StatusCode::OK.into_response();
|
||||
}
|
||||
|
||||
// For Running status, proceed with normal termination
|
||||
state.set_status(mode.into(), &compute.state_changed);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
forward_termination_signal(false);
|
||||
|
||||
@@ -23,7 +23,8 @@ use super::{
|
||||
middleware::authorize::Authorize,
|
||||
routes::{
|
||||
check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions,
|
||||
grants, insights, lfc, metrics, metrics_json, promote, status, terminate,
|
||||
grants, hadron_liveness_probe, insights, lfc, metrics, metrics_json, promote,
|
||||
refresh_configuration, status, terminate,
|
||||
},
|
||||
};
|
||||
use crate::compute::ComputeNode;
|
||||
@@ -43,6 +44,7 @@ pub enum Server {
|
||||
port: u16,
|
||||
config: ComputeCtlConfig,
|
||||
compute_id: String,
|
||||
instance_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -67,7 +69,12 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
post(extension_server::download_extension),
|
||||
)
|
||||
.route("/extensions", post(extensions::install_extension))
|
||||
.route("/grants", post(grants::add_grant));
|
||||
.route("/grants", post(grants::add_grant))
|
||||
// Hadron: Compute-initiated configuration refresh
|
||||
.route(
|
||||
"/refresh_configuration",
|
||||
post(refresh_configuration::refresh_configuration),
|
||||
);
|
||||
|
||||
// Add in any testing support
|
||||
if cfg!(feature = "testing") {
|
||||
@@ -79,7 +86,10 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
router
|
||||
}
|
||||
Server::External {
|
||||
config, compute_id, ..
|
||||
config,
|
||||
compute_id,
|
||||
instance_id,
|
||||
..
|
||||
} => {
|
||||
let unauthenticated_router = Router::<Arc<ComputeNode>>::new()
|
||||
.route("/metrics", get(metrics::get_metrics))
|
||||
@@ -100,8 +110,13 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
.route("/metrics.json", get(metrics_json::get_metrics))
|
||||
.route("/status", get(status::get_status))
|
||||
.route("/terminate", post(terminate::terminate))
|
||||
.route(
|
||||
"/hadron_liveness_probe",
|
||||
get(hadron_liveness_probe::hadron_liveness_probe),
|
||||
)
|
||||
.layer(AsyncRequireAuthorizationLayer::new(Authorize::new(
|
||||
compute_id.clone(),
|
||||
instance_id.clone(),
|
||||
config.jwks.clone(),
|
||||
)));
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::collections::HashMap;
|
||||
|
||||
use anyhow::Result;
|
||||
use compute_api::responses::{InstalledExtension, InstalledExtensions};
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio_postgres::error::Error as PostgresError;
|
||||
use tokio_postgres::{Client, Config, NoTls};
|
||||
|
||||
@@ -119,3 +120,7 @@ pub async fn get_installed_extensions(
|
||||
extensions: extensions_map.into_values().collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn initialize_metrics() {
|
||||
Lazy::force(&INSTALLED_EXTENSIONS);
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ pub mod monitor;
|
||||
pub mod pageserver_client;
|
||||
pub mod params;
|
||||
pub mod pg_helpers;
|
||||
pub mod pg_isready;
|
||||
pub mod pgbouncer;
|
||||
pub(crate) mod ro_replica;
|
||||
pub mod rsyslog;
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{LazyLock, RwLock};
|
||||
use tracing::Subscriber;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_appender;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::{fmt, layer::SubscriberExt, registry::LookupSpan};
|
||||
|
||||
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
||||
///
|
||||
@@ -15,16 +18,44 @@ use tracing_subscriber::prelude::*;
|
||||
///
|
||||
pub fn init_tracing_and_logging(
|
||||
default_log_level: &str,
|
||||
) -> anyhow::Result<Option<tracing_utils::Provider>> {
|
||||
log_dir_opt: &Option<String>,
|
||||
) -> anyhow::Result<(
|
||||
Option<tracing_utils::Provider>,
|
||||
Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
)> {
|
||||
// Initialize Logging
|
||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||
|
||||
// Standard output streams
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_writer(std::io::stderr);
|
||||
|
||||
// Logs with file rotation. Files in `$log_dir/pgcctl.yyyy-MM-dd`
|
||||
let (json_to_file_layer, _file_logs_guard) = if let Some(log_dir) = log_dir_opt {
|
||||
std::fs::create_dir_all(log_dir)?;
|
||||
let file_logs_appender = tracing_appender::rolling::RollingFileAppender::builder()
|
||||
.rotation(tracing_appender::rolling::Rotation::DAILY)
|
||||
.filename_prefix("pgcctl")
|
||||
// Lib appends to existing files, so we will keep files for up to 2 days even on restart loops.
|
||||
// At minimum, log-daemon will have 1 day to detect and upload a file (if created right before midnight).
|
||||
.max_log_files(2)
|
||||
.build(log_dir)
|
||||
.expect("Initializing rolling file appender should succeed");
|
||||
let (file_logs_writer, _file_logs_guard) =
|
||||
tracing_appender::non_blocking(file_logs_appender);
|
||||
let json_to_file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.event_format(PgJsonLogShapeFormatter)
|
||||
.with_writer(file_logs_writer);
|
||||
(Some(json_to_file_layer), Some(_file_logs_guard))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
// Initialize OpenTelemetry
|
||||
let provider =
|
||||
tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default());
|
||||
@@ -35,12 +66,13 @@ pub fn init_tracing_and_logging(
|
||||
.with(env_filter)
|
||||
.with(otlp_layer)
|
||||
.with(fmt_layer)
|
||||
.with(json_to_file_layer)
|
||||
.init();
|
||||
tracing::info!("logging and tracing started");
|
||||
|
||||
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||
|
||||
Ok(provider)
|
||||
Ok((provider, _file_logs_guard))
|
||||
}
|
||||
|
||||
/// Replace all newline characters with a special character to make it
|
||||
@@ -95,3 +127,157 @@ pub fn startup_context_from_env() -> Option<opentelemetry::Context> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Track relevant id's
|
||||
const UNKNOWN_IDS: &str = r#""pg_instance_id": "", "pg_compute_id": """#;
|
||||
static IDS: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new(UNKNOWN_IDS.to_string()));
|
||||
|
||||
pub fn update_ids(instance_id: &Option<String>, compute_id: &Option<String>) -> anyhow::Result<()> {
|
||||
let ids = format!(
|
||||
r#""pg_instance_id": "{}", "pg_compute_id": "{}""#,
|
||||
instance_id.as_ref().map(|s| s.as_str()).unwrap_or_default(),
|
||||
compute_id.as_ref().map(|s| s.as_str()).unwrap_or_default()
|
||||
);
|
||||
let mut guard = IDS
|
||||
.write()
|
||||
.map_err(|e| anyhow::anyhow!("Log set id's rwlock poisoned: {}", e))?;
|
||||
*guard = ids;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Massage compute_ctl logs into PG json log shape so we can use the same Lumberjack setup.
|
||||
struct PgJsonLogShapeFormatter;
|
||||
impl<S, N> fmt::format::FormatEvent<S, N> for PgJsonLogShapeFormatter
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> fmt::format::FormatFields<'a> + 'static,
|
||||
{
|
||||
fn format_event(
|
||||
&self,
|
||||
ctx: &fmt::FmtContext<'_, S, N>,
|
||||
mut writer: fmt::format::Writer<'_>,
|
||||
event: &tracing::Event<'_>,
|
||||
) -> std::fmt::Result {
|
||||
// Format values from the event's metadata, and open message string
|
||||
let metadata = event.metadata();
|
||||
{
|
||||
let ids_guard = IDS.read();
|
||||
let ids = ids_guard
|
||||
.as_ref()
|
||||
.map(|guard| guard.as_str())
|
||||
// Surpress so that we don't lose all uploaded/ file logs if something goes super wrong. We would notice the missing id's.
|
||||
.unwrap_or(UNKNOWN_IDS);
|
||||
write!(
|
||||
&mut writer,
|
||||
r#"{{"timestamp": "{}", "error_severity": "{}", "file_name": "{}", "backend_type": "compute_ctl_self", {}, "message": "#,
|
||||
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f GMT"),
|
||||
metadata.level(),
|
||||
metadata.target(),
|
||||
ids
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut message = String::new();
|
||||
let message_writer = fmt::format::Writer::new(&mut message);
|
||||
|
||||
// Gather the message
|
||||
ctx.field_format().format_fields(message_writer, event)?;
|
||||
|
||||
// TODO: any better options than to copy-paste this OSS span formatter?
|
||||
// impl<S, N, T> FormatEvent<S, N> for Format<Full, T>
|
||||
// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/trait.FormatEvent.html#impl-FormatEvent%3CS,+N%3E-for-Format%3CFull,+T%3E
|
||||
|
||||
// write message, close bracket, and new line
|
||||
writeln!(writer, "{}}}", serde_json::to_string(&message).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::{cell::RefCell, io};
|
||||
|
||||
// Use thread_local! instead of Mutex for test isolation
|
||||
thread_local! {
|
||||
static WRITER_OUTPUT: RefCell<String> = const { RefCell::new(String::new()) };
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct StaticStringWriter;
|
||||
|
||||
impl io::Write for StaticStringWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let output = String::from_utf8(buf.to_vec()).expect("Invalid UTF-8 in test output");
|
||||
WRITER_OUTPUT.with(|s| s.borrow_mut().push_str(&output));
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::MakeWriter<'_> for StaticStringWriter {
|
||||
type Writer = Self;
|
||||
|
||||
fn make_writer(&self) -> Self::Writer {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_pg_json_shape_formatter() {
|
||||
// Use a scoped subscriber to prevent global state pollution
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.event_format(PgJsonLogShapeFormatter)
|
||||
.with_writer(StaticStringWriter),
|
||||
);
|
||||
|
||||
let _ = update_ids(&Some("000".to_string()), &Some("111".to_string()));
|
||||
|
||||
// Clear any previous test state
|
||||
WRITER_OUTPUT.with(|s| s.borrow_mut().clear());
|
||||
|
||||
let messages = [
|
||||
"test message",
|
||||
r#"json escape check: name="BatchSpanProcessor.Flush.ExportError" reason="Other(reqwest::Error { kind: Request, url: \"http://localhost:4318/v1/traces\", source: hyper_
|
||||
util::client::legacy::Error(Connect, ConnectError(\"tcp connect error\", Os { code: 111, kind: ConnectionRefused, message: \"Connection refused\" })) })" Failed during the export process"#,
|
||||
];
|
||||
|
||||
tracing::subscriber::with_default(subscriber, || {
|
||||
for message in messages {
|
||||
tracing::info!(message);
|
||||
}
|
||||
});
|
||||
tracing::info!("not test message");
|
||||
|
||||
// Get captured output
|
||||
let output = WRITER_OUTPUT.with(|s| s.borrow().clone());
|
||||
|
||||
let json_strings: Vec<&str> = output.lines().collect();
|
||||
assert_eq!(
|
||||
json_strings.len(),
|
||||
messages.len(),
|
||||
"Log didn't have the expected number of json strings."
|
||||
);
|
||||
|
||||
let json_string_shape_regex = regex::Regex::new(
|
||||
r#"\{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} GMT", "error_severity": "INFO", "file_name": ".+", "backend_type": "compute_ctl_self", "pg_instance_id": "000", "pg_compute_id": "111", "message": ".+"\}"#
|
||||
).unwrap();
|
||||
|
||||
for (i, expected_message) in messages.iter().enumerate() {
|
||||
let json_string = json_strings[i];
|
||||
assert!(
|
||||
json_string_shape_regex.is_match(json_string),
|
||||
"Json log didn't match expected pattern:\n{json_string}",
|
||||
);
|
||||
let parsed_json: serde_json::Value = serde_json::from_str(json_string).unwrap();
|
||||
let actual_message = parsed_json["message"].as_str().unwrap();
|
||||
assert_eq!(*expected_message, actual_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
30
compute_tools/src/pg_isready.rs
Normal file
30
compute_tools/src/pg_isready.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
|
||||
// Run `/usr/local/bin/pg_isready -p {port}`
|
||||
// Check the connectivity of PG
|
||||
// Success means PG is listening on the port and accepting connections
|
||||
// Note that PG does not need to authenticate the connection, nor reserve a connection quota for it.
|
||||
// See https://www.postgresql.org/docs/current/app-pg-isready.html
|
||||
pub fn pg_isready(bin: &str, port: u16) -> anyhow::Result<()> {
|
||||
let child_result = std::process::Command::new(bin)
|
||||
.arg("-p")
|
||||
.arg(port.to_string())
|
||||
.spawn();
|
||||
|
||||
child_result
|
||||
.context("spawn() failed")
|
||||
.and_then(|mut child| child.wait().context("wait() failed"))
|
||||
.and_then(|status| match status.success() {
|
||||
true => Ok(()),
|
||||
false => Err(anyhow!("process exited with {status}")),
|
||||
})
|
||||
// wrap any prior error with the overall context that we couldn't run the command
|
||||
.with_context(|| format!("could not run `{bin} --port {port}`"))
|
||||
}
|
||||
|
||||
// It's safe to assume pg_isready is under the same directory with postgres,
|
||||
// because it is a PG util bin installed along with postgres
|
||||
pub fn get_pg_isready_bin(pgbin: &str) -> String {
|
||||
let split = pgbin.split("/").collect::<Vec<&str>>();
|
||||
split[0..split.len() - 1].join("/") + "/pg_isready"
|
||||
}
|
||||
@@ -560,7 +560,9 @@ enum EndpointCmd {
|
||||
Create(EndpointCreateCmdArgs),
|
||||
Start(EndpointStartCmdArgs),
|
||||
Reconfigure(EndpointReconfigureCmdArgs),
|
||||
RefreshConfiguration(EndpointRefreshConfigurationArgs),
|
||||
Stop(EndpointStopCmdArgs),
|
||||
UpdatePageservers(EndpointUpdatePageserversCmdArgs),
|
||||
GenerateJwt(EndpointGenerateJwtCmdArgs),
|
||||
}
|
||||
|
||||
@@ -731,6 +733,13 @@ struct EndpointReconfigureCmdArgs {
|
||||
safekeepers: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Refresh the endpoint's configuration by forcing it reload it's spec")]
|
||||
struct EndpointRefreshConfigurationArgs {
|
||||
#[clap(help = "Postgres endpoint id")]
|
||||
endpoint_id: String,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Stop an endpoint")]
|
||||
struct EndpointStopCmdArgs {
|
||||
@@ -748,6 +757,16 @@ struct EndpointStopCmdArgs {
|
||||
mode: EndpointTerminateMode,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Update the pageservers in the spec file of the compute endpoint")]
|
||||
struct EndpointUpdatePageserversCmdArgs {
|
||||
#[clap(help = "Postgres endpoint id")]
|
||||
endpoint_id: String,
|
||||
|
||||
#[clap(short = 'p', long, help = "Specified pageserver id")]
|
||||
pageserver_id: Option<NodeId>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
#[clap(about = "Generate a JWT for an endpoint")]
|
||||
struct EndpointGenerateJwtCmdArgs {
|
||||
@@ -1531,7 +1550,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.ok_or_else(|| anyhow::anyhow!("endpoint {endpoint_id} not found"))?;
|
||||
.ok_or_else(|| anyhow!("endpoint {endpoint_id} not found"))?;
|
||||
|
||||
if !args.allow_multiple {
|
||||
cplane.check_conflicting_endpoints(
|
||||
@@ -1639,6 +1658,44 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
println!("Starting existing endpoint {endpoint_id}...");
|
||||
endpoint.start(args).await?;
|
||||
}
|
||||
EndpointCmd::UpdatePageservers(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
let pageservers = match args.pageserver_id {
|
||||
Some(pageserver_id) => {
|
||||
let pageserver =
|
||||
PageServerNode::from_env(env, env.get_pageserver_conf(pageserver_id)?);
|
||||
|
||||
vec![(
|
||||
PageserverProtocol::Libpq,
|
||||
pageserver.pg_connection_config.host().clone(),
|
||||
pageserver.pg_connection_config.port(),
|
||||
)]
|
||||
}
|
||||
None => {
|
||||
let storage_controller = StorageController::from_env(env);
|
||||
storage_controller
|
||||
.tenant_locate(endpoint.tenant_id)
|
||||
.await?
|
||||
.shards
|
||||
.into_iter()
|
||||
.map(|shard| {
|
||||
(
|
||||
PageserverProtocol::Libpq,
|
||||
Host::parse(&shard.listen_pg_addr)
|
||||
.expect("Storage controller reported malformed host"),
|
||||
shard.listen_pg_port,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
endpoint.update_pageservers_in_config(pageservers).await?;
|
||||
}
|
||||
EndpointCmd::Reconfigure(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
@@ -1692,6 +1749,14 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
.reconfigure(Some(pageservers), None, safekeepers, None)
|
||||
.await?;
|
||||
}
|
||||
EndpointCmd::RefreshConfiguration(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
.endpoints
|
||||
.get(endpoint_id.as_str())
|
||||
.with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?;
|
||||
endpoint.refresh_configuration().await?;
|
||||
}
|
||||
EndpointCmd::Stop(args) => {
|
||||
let endpoint_id = &args.endpoint_id;
|
||||
let endpoint = cplane
|
||||
|
||||
@@ -938,7 +938,9 @@ impl Endpoint {
|
||||
| ComputeStatus::Configuration
|
||||
| ComputeStatus::TerminationPendingFast
|
||||
| ComputeStatus::TerminationPendingImmediate
|
||||
| ComputeStatus::Terminated => {
|
||||
| ComputeStatus::Terminated
|
||||
| ComputeStatus::RefreshConfigurationPending
|
||||
| ComputeStatus::RefreshConfiguration => {
|
||||
bail!("unexpected compute status: {:?}", state.status)
|
||||
}
|
||||
}
|
||||
@@ -961,6 +963,29 @@ impl Endpoint {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Update the pageservers in the spec file of the endpoint. This is useful to test the spec refresh scenario.
|
||||
pub async fn update_pageservers_in_config(
|
||||
&self,
|
||||
pageservers: Vec<(PageserverProtocol, Host, u16)>,
|
||||
) -> Result<()> {
|
||||
let config_path = self.endpoint_path().join("config.json");
|
||||
let mut config: ComputeConfig = {
|
||||
let file = std::fs::File::open(&config_path)?;
|
||||
serde_json::from_reader(file)?
|
||||
};
|
||||
|
||||
let pageserver_connstring = Self::build_pageserver_connstr(&pageservers);
|
||||
assert!(!pageserver_connstring.is_empty());
|
||||
let mut spec = config.spec.unwrap();
|
||||
spec.pageserver_connstring = Some(pageserver_connstring);
|
||||
config.spec = Some(spec);
|
||||
|
||||
let file = std::fs::File::create(&config_path)?;
|
||||
serde_json::to_writer_pretty(file, &config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Call the /status HTTP API
|
||||
pub async fn get_status(&self) -> Result<ComputeStatusResponse> {
|
||||
let client = reqwest::Client::new();
|
||||
@@ -1126,6 +1151,33 @@ impl Endpoint {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub async fn refresh_configuration(&self) -> Result<()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.unwrap();
|
||||
let response = client
|
||||
.post(format!(
|
||||
"http://{}:{}/refresh_configuration",
|
||||
self.internal_http_address.ip(),
|
||||
self.internal_http_address.port()
|
||||
))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let status = response.status();
|
||||
if !(status.is_client_error() || status.is_server_error()) {
|
||||
Ok(())
|
||||
} else {
|
||||
let url = response.url().to_owned();
|
||||
let msg = match response.text().await {
|
||||
Ok(err_body) => format!("Error: {err_body}"),
|
||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||
};
|
||||
Err(anyhow::anyhow!(msg))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connstr(&self, user: &str, db_name: &str) -> String {
|
||||
format!(
|
||||
"postgresql://{}@{}:{}/{}",
|
||||
|
||||
@@ -108,11 +108,10 @@ pub enum PromoteState {
|
||||
Failed { error: String },
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
|
||||
#[derive(Deserialize, Default, Debug)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
/// Result of /safekeepers_lsn
|
||||
pub struct SafekeepersLsn {
|
||||
pub safekeepers: String,
|
||||
pub struct PromoteConfig {
|
||||
pub spec: ComputeSpec,
|
||||
pub wal_flush_lsn: utils::lsn::Lsn,
|
||||
}
|
||||
|
||||
@@ -173,6 +172,11 @@ pub enum ComputeStatus {
|
||||
TerminationPendingImmediate,
|
||||
// Terminated Postgres
|
||||
Terminated,
|
||||
// A spec refresh is being requested
|
||||
RefreshConfigurationPending,
|
||||
// A spec refresh is being applied. We cannot refresh configuration again until the current
|
||||
// refresh is done, i.e., signal_refresh_configuration() will return 500 error.
|
||||
RefreshConfiguration,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
@@ -185,6 +189,10 @@ impl Display for ComputeStatus {
|
||||
match self {
|
||||
ComputeStatus::Empty => f.write_str("empty"),
|
||||
ComputeStatus::ConfigurationPending => f.write_str("configuration-pending"),
|
||||
ComputeStatus::RefreshConfiguration => f.write_str("refresh-configuration"),
|
||||
ComputeStatus::RefreshConfigurationPending => {
|
||||
f.write_str("refresh-configuration-pending")
|
||||
}
|
||||
ComputeStatus::Init => f.write_str("init"),
|
||||
ComputeStatus::Running => f.write_str("running"),
|
||||
ComputeStatus::Configuration => f.write_str("configuration"),
|
||||
|
||||
@@ -15,6 +15,7 @@ use tokio::sync::mpsc;
|
||||
use crate::cancel_token::RawCancelToken;
|
||||
use crate::codec::{BackendMessages, FrontendMessage, RecordNotices};
|
||||
use crate::config::{Host, SslMode};
|
||||
use crate::connection::gc_bytesmut;
|
||||
use crate::query::RowStream;
|
||||
use crate::simple_query::SimpleQueryStream;
|
||||
use crate::types::{Oid, Type};
|
||||
@@ -95,20 +96,13 @@ impl InnerClient {
|
||||
Ok(PartialQuery(Some(self)))
|
||||
}
|
||||
|
||||
// pub fn send_with_sync<F>(&mut self, f: F) -> Result<&mut Responses, Error>
|
||||
// where
|
||||
// F: FnOnce(&mut BytesMut) -> Result<(), Error>,
|
||||
// {
|
||||
// self.start()?.send_with_sync(f)
|
||||
// }
|
||||
|
||||
pub fn send_simple_query(&mut self, query: &str) -> Result<&mut Responses, Error> {
|
||||
self.responses.waiting += 1;
|
||||
|
||||
self.buffer.clear();
|
||||
// simple queries do not need sync.
|
||||
frontend::query(query, &mut self.buffer).map_err(Error::encode)?;
|
||||
let buf = self.buffer.split().freeze();
|
||||
let buf = self.buffer.split();
|
||||
self.send_message(FrontendMessage::Raw(buf))
|
||||
}
|
||||
|
||||
@@ -125,7 +119,7 @@ impl Drop for PartialQuery<'_> {
|
||||
if let Some(client) = self.0.take() {
|
||||
client.buffer.clear();
|
||||
frontend::sync(&mut client.buffer);
|
||||
let buf = client.buffer.split().freeze();
|
||||
let buf = client.buffer.split();
|
||||
let _ = client.send_message(FrontendMessage::Raw(buf));
|
||||
}
|
||||
}
|
||||
@@ -141,7 +135,7 @@ impl<'a> PartialQuery<'a> {
|
||||
client.buffer.clear();
|
||||
f(&mut client.buffer)?;
|
||||
frontend::flush(&mut client.buffer);
|
||||
let buf = client.buffer.split().freeze();
|
||||
let buf = client.buffer.split();
|
||||
client.send_message(FrontendMessage::Raw(buf))
|
||||
}
|
||||
|
||||
@@ -154,7 +148,7 @@ impl<'a> PartialQuery<'a> {
|
||||
client.buffer.clear();
|
||||
f(&mut client.buffer)?;
|
||||
frontend::sync(&mut client.buffer);
|
||||
let buf = client.buffer.split().freeze();
|
||||
let buf = client.buffer.split();
|
||||
let _ = client.send_message(FrontendMessage::Raw(buf));
|
||||
|
||||
Ok(&mut self.0.take().unwrap().responses)
|
||||
@@ -292,8 +286,35 @@ impl Client {
|
||||
simple_query::batch_execute(self.inner_mut(), query).await
|
||||
}
|
||||
|
||||
pub async fn discard_all(&mut self) -> Result<ReadyForQueryStatus, Error> {
|
||||
self.batch_execute("discard all").await
|
||||
/// Similar to `discard_all`, but it does not clear any query plans
|
||||
///
|
||||
/// This runs in the background, so it can be executed without `await`ing.
|
||||
pub fn reset_session_background(&mut self) -> Result<(), Error> {
|
||||
// "CLOSE ALL": closes any cursors
|
||||
// "SET SESSION AUTHORIZATION DEFAULT": resets the current_user back to the session_user
|
||||
// "RESET ALL": resets any GUCs back to their session defaults.
|
||||
// "DEALLOCATE ALL": deallocates any prepared statements
|
||||
// "UNLISTEN *": stops listening on all channels
|
||||
// "SELECT pg_advisory_unlock_all();": unlocks all advisory locks
|
||||
// "DISCARD TEMP;": drops all temporary tables
|
||||
// "DISCARD SEQUENCES;": deallocates all cached sequence state
|
||||
|
||||
let _responses = self.inner_mut().send_simple_query(
|
||||
"ROLLBACK;
|
||||
CLOSE ALL;
|
||||
SET SESSION AUTHORIZATION DEFAULT;
|
||||
RESET ALL;
|
||||
DEALLOCATE ALL;
|
||||
UNLISTEN *;
|
||||
SELECT pg_advisory_unlock_all();
|
||||
DISCARD TEMP;
|
||||
DISCARD SEQUENCES;",
|
||||
)?;
|
||||
|
||||
// Clean up memory usage.
|
||||
gc_bytesmut(&mut self.inner_mut().buffer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Begins a new database transaction.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytes::BytesMut;
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use postgres_protocol2::message::backend;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
pub enum FrontendMessage {
|
||||
Raw(Bytes),
|
||||
Raw(BytesMut),
|
||||
RecordNotices(RecordNotices),
|
||||
}
|
||||
|
||||
@@ -17,7 +17,10 @@ pub struct RecordNotices {
|
||||
}
|
||||
|
||||
pub enum BackendMessage {
|
||||
Normal { messages: BackendMessages },
|
||||
Normal {
|
||||
messages: BackendMessages,
|
||||
ready: bool,
|
||||
},
|
||||
Async(backend::Message),
|
||||
}
|
||||
|
||||
@@ -40,11 +43,18 @@ impl FallibleIterator for BackendMessages {
|
||||
|
||||
pub struct PostgresCodec;
|
||||
|
||||
impl Encoder<Bytes> for PostgresCodec {
|
||||
impl Encoder<BytesMut> for PostgresCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> io::Result<()> {
|
||||
dst.extend_from_slice(&item);
|
||||
fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> io::Result<()> {
|
||||
// When it comes to request/response workflows, we usually flush the entire write
|
||||
// buffer in order to wait for the response before we send a new request.
|
||||
// Therefore we can avoid the copy and just replace the buffer.
|
||||
if dst.is_empty() {
|
||||
*dst = item;
|
||||
} else {
|
||||
dst.extend_from_slice(&item);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -56,6 +66,7 @@ impl Decoder for PostgresCodec {
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<BackendMessage>, io::Error> {
|
||||
let mut idx = 0;
|
||||
|
||||
let mut ready = false;
|
||||
while let Some(header) = backend::Header::parse(&src[idx..])? {
|
||||
let len = header.len() as usize + 1;
|
||||
if src[idx..].len() < len {
|
||||
@@ -79,6 +90,7 @@ impl Decoder for PostgresCodec {
|
||||
idx += len;
|
||||
|
||||
if header.tag() == backend::READY_FOR_QUERY_TAG {
|
||||
ready = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -88,6 +100,7 @@ impl Decoder for PostgresCodec {
|
||||
} else {
|
||||
Ok(Some(BackendMessage::Normal {
|
||||
messages: BackendMessages(src.split_to(idx)),
|
||||
ready,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,19 +250,20 @@ impl Config {
|
||||
{
|
||||
let stream = connect_tls(stream, self.ssl_mode, tls).await?;
|
||||
let mut stream = StartupStream::new(stream);
|
||||
connect_raw::startup(&mut stream, self).await?;
|
||||
connect_raw::authenticate(&mut stream, self).await?;
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
pub async fn authenticate<S, T>(&self, stream: &mut StartupStream<S, T>) -> Result<(), Error>
|
||||
pub fn authenticate<S, T>(
|
||||
&self,
|
||||
stream: &mut StartupStream<S, T>,
|
||||
) -> impl Future<Output = Result<(), Error>>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
connect_raw::startup(stream, self).await?;
|
||||
connect_raw::authenticate(stream, self).await
|
||||
connect_raw::authenticate(stream, self)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use tokio::net::TcpStream;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::client::SocketConfig;
|
||||
use crate::config::Host;
|
||||
use crate::config::{Host, SslMode};
|
||||
use crate::connect_raw::StartupStream;
|
||||
use crate::connect_socket::connect_socket;
|
||||
use crate::tls::{MakeTlsConnect, TlsConnect};
|
||||
@@ -45,14 +45,36 @@ where
|
||||
T: TlsConnect<TcpStream>,
|
||||
{
|
||||
let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?;
|
||||
let mut stream = config.tls_and_authenticate(socket, tls).await?;
|
||||
let stream = config.tls_and_authenticate(socket, tls).await?;
|
||||
managed(
|
||||
stream,
|
||||
host_addr,
|
||||
host.clone(),
|
||||
port,
|
||||
config.ssl_mode,
|
||||
config.connect_timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn managed<TlsStream>(
|
||||
mut stream: StartupStream<TcpStream, TlsStream>,
|
||||
host_addr: Option<IpAddr>,
|
||||
host: Host,
|
||||
port: u16,
|
||||
ssl_mode: SslMode,
|
||||
connect_timeout: Option<std::time::Duration>,
|
||||
) -> Result<(Client, Connection<TcpStream, TlsStream>), Error>
|
||||
where
|
||||
TlsStream: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let (process_id, secret_key) = wait_until_ready(&mut stream).await?;
|
||||
|
||||
let socket_config = SocketConfig {
|
||||
host_addr,
|
||||
host: host.clone(),
|
||||
host,
|
||||
port,
|
||||
connect_timeout: config.connect_timeout,
|
||||
connect_timeout,
|
||||
};
|
||||
|
||||
let (client_tx, conn_rx) = mpsc::unbounded_channel();
|
||||
@@ -61,7 +83,7 @@ where
|
||||
client_tx,
|
||||
client_rx,
|
||||
socket_config,
|
||||
config.ssl_mode,
|
||||
ssl_mode,
|
||||
process_id,
|
||||
secret_key,
|
||||
);
|
||||
|
||||
@@ -2,51 +2,28 @@ use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll, ready};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytes::BytesMut;
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use futures_util::{Sink, SinkExt, Stream, TryStreamExt};
|
||||
use futures_util::{SinkExt, Stream, TryStreamExt};
|
||||
use postgres_protocol2::authentication::sasl;
|
||||
use postgres_protocol2::authentication::sasl::ScramSha256;
|
||||
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message};
|
||||
use postgres_protocol2::message::frontend;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tokio_util::codec::{Framed, FramedParts, FramedWrite};
|
||||
use tokio_util::codec::{Framed, FramedParts};
|
||||
|
||||
use crate::Error;
|
||||
use crate::codec::PostgresCodec;
|
||||
use crate::config::{self, AuthKeys, Config};
|
||||
use crate::connection::{GC_THRESHOLD, INITIAL_CAPACITY};
|
||||
use crate::maybe_tls_stream::MaybeTlsStream;
|
||||
use crate::tls::TlsStream;
|
||||
|
||||
pub struct StartupStream<S, T> {
|
||||
inner: FramedWrite<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
inner: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
read_buf: BytesMut,
|
||||
}
|
||||
|
||||
impl<S, T> Sink<Bytes> for StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Error = io::Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_ready(cx)
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: Bytes) -> io::Result<()> {
|
||||
Pin::new(&mut self.inner).start_send(item)
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_close(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, T> Stream for StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
@@ -55,6 +32,8 @@ where
|
||||
type Item = io::Result<Message>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// We don't use `self.inner.poll_next()` as that might over-read into the read buffer.
|
||||
|
||||
// read 1 byte tag, 4 bytes length.
|
||||
let header = ready!(self.as_mut().poll_fill_buf_exact(cx, 5)?);
|
||||
|
||||
@@ -121,36 +100,28 @@ where
|
||||
}
|
||||
|
||||
pub fn into_framed(mut self) -> Framed<MaybeTlsStream<S, T>, PostgresCodec> {
|
||||
let write_buf = std::mem::take(self.inner.write_buffer_mut());
|
||||
let io = self.inner.into_inner();
|
||||
let mut parts = FramedParts::new(io, PostgresCodec);
|
||||
parts.read_buf = self.read_buf;
|
||||
parts.write_buf = write_buf;
|
||||
Framed::from_parts(parts)
|
||||
*self.inner.read_buffer_mut() = self.read_buf;
|
||||
self.inner
|
||||
}
|
||||
|
||||
pub fn new(io: MaybeTlsStream<S, T>) -> Self {
|
||||
let mut parts = FramedParts::new(io, PostgresCodec);
|
||||
parts.write_buf = BytesMut::with_capacity(INITIAL_CAPACITY);
|
||||
|
||||
let mut inner = Framed::from_parts(parts);
|
||||
|
||||
// This is the default already, but nice to be explicit.
|
||||
// We divide by two because writes will overshoot the boundary.
|
||||
// We don't want constant overshoots to cause us to constantly re-shrink the buffer.
|
||||
inner.set_backpressure_boundary(GC_THRESHOLD / 2);
|
||||
|
||||
Self {
|
||||
inner: FramedWrite::new(io, PostgresCodec),
|
||||
read_buf: BytesMut::new(),
|
||||
inner,
|
||||
read_buf: BytesMut::with_capacity(INITIAL_CAPACITY),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn startup<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
config: &Config,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::startup_message(&config.server_params, &mut buf).map_err(Error::encode)?;
|
||||
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
pub(crate) async fn authenticate<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
config: &Config,
|
||||
@@ -159,6 +130,10 @@ where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
frontend::startup_message(&config.server_params, stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationOk) => {
|
||||
can_skip_channel_binding(config)?;
|
||||
@@ -172,7 +147,8 @@ where
|
||||
.as_ref()
|
||||
.ok_or_else(|| Error::config("password missing".into()))?;
|
||||
|
||||
authenticate_password(stream, pass).await?;
|
||||
frontend::password_message(pass, stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
}
|
||||
Some(Message::AuthenticationSasl(body)) => {
|
||||
authenticate_sasl(stream, body, config).await?;
|
||||
@@ -191,6 +167,7 @@ where
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationOk) => Ok(()),
|
||||
Some(Message::ErrorResponse(body)) => Err(Error::db(body)),
|
||||
@@ -208,20 +185,6 @@ fn can_skip_channel_binding(config: &Config) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn authenticate_password<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
password: &[u8],
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::password_message(password, &mut buf).map_err(Error::encode)?;
|
||||
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
async fn authenticate_sasl<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
body: AuthenticationSaslBody,
|
||||
@@ -276,10 +239,10 @@ where
|
||||
return Err(Error::config("password or auth keys missing".into()));
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?;
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)?;
|
||||
frontend::sasl_initial_response(mechanism, scram.message(), stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
let body = match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationSaslContinue(body)) => body,
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
@@ -292,10 +255,10 @@ where
|
||||
.await
|
||||
.map_err(|e| Error::authentication(e.into()))?;
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?;
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)?;
|
||||
frontend::sasl_response(scram.message(), stream.inner.write_buffer_mut())
|
||||
.map_err(Error::encode)?;
|
||||
|
||||
stream.inner.flush().await.map_err(Error::io)?;
|
||||
let body = match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::AuthenticationSaslFinal(body)) => body,
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
|
||||
@@ -44,6 +44,27 @@ pub struct Connection<S, T> {
|
||||
state: State,
|
||||
}
|
||||
|
||||
pub const INITIAL_CAPACITY: usize = 2 * 1024;
|
||||
pub const GC_THRESHOLD: usize = 16 * 1024;
|
||||
|
||||
/// Gargabe collect the [`BytesMut`] if it has too much spare capacity.
|
||||
pub fn gc_bytesmut(buf: &mut BytesMut) {
|
||||
// We use a different mode to shrink the buf when above the threshold.
|
||||
// When above the threshold, we only re-allocate when the buf has 2x spare capacity.
|
||||
let reclaim = GC_THRESHOLD.checked_sub(buf.len()).unwrap_or(buf.len());
|
||||
|
||||
// `try_reclaim` tries to get the capacity from any shared `BytesMut`s,
|
||||
// before then comparing the length against the capacity.
|
||||
if buf.try_reclaim(reclaim) {
|
||||
let capacity = usize::max(buf.len(), INITIAL_CAPACITY);
|
||||
|
||||
// Allocate a new `BytesMut` so that we deallocate the old version.
|
||||
let mut new = BytesMut::with_capacity(capacity);
|
||||
new.extend_from_slice(buf);
|
||||
*buf = new;
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Never {}
|
||||
|
||||
impl<S, T> Connection<S, T>
|
||||
@@ -86,7 +107,14 @@ where
|
||||
continue;
|
||||
}
|
||||
BackendMessage::Async(_) => continue,
|
||||
BackendMessage::Normal { messages } => messages,
|
||||
BackendMessage::Normal { messages, ready } => {
|
||||
// if we read a ReadyForQuery from postgres, let's try GC the read buffer.
|
||||
if ready {
|
||||
gc_bytesmut(self.stream.read_buffer_mut());
|
||||
}
|
||||
|
||||
messages
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -177,12 +205,7 @@ where
|
||||
// Send a terminate message to postgres
|
||||
Poll::Ready(None) => {
|
||||
trace!("poll_write: at eof, terminating");
|
||||
let mut request = BytesMut::new();
|
||||
frontend::terminate(&mut request);
|
||||
|
||||
Pin::new(&mut self.stream)
|
||||
.start_send(request.freeze())
|
||||
.map_err(Error::io)?;
|
||||
frontend::terminate(self.stream.write_buffer_mut());
|
||||
|
||||
trace!("poll_write: sent eof, closing");
|
||||
trace!("poll_write: done");
|
||||
@@ -205,6 +228,10 @@ where
|
||||
{
|
||||
Poll::Ready(()) => {
|
||||
trace!("poll_flush: flushed");
|
||||
|
||||
// GC the write buffer if we managed to flush
|
||||
gc_bytesmut(self.stream.write_buffer_mut());
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
Poll::Pending => {
|
||||
|
||||
@@ -48,7 +48,7 @@ mod cancel_token;
|
||||
mod client;
|
||||
mod codec;
|
||||
pub mod config;
|
||||
mod connect;
|
||||
pub mod connect;
|
||||
pub mod connect_raw;
|
||||
mod connect_socket;
|
||||
mod connect_tls;
|
||||
|
||||
@@ -8,7 +8,7 @@ license.workspace = true
|
||||
hyper0.workspace = true
|
||||
opentelemetry = { workspace = true, features = ["trace"] }
|
||||
opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = { workspace = true, default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-otlp = { workspace = true, default-features = false, features = ["http-proto", "trace", "http", "reqwest-blocking-client"] }
|
||||
opentelemetry-semantic-conventions.workspace = true
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -429,9 +429,11 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState {
|
||||
};
|
||||
|
||||
let empty_wal_rate_limiter = crate::bindings::WalRateLimiter {
|
||||
effective_max_wal_bytes_per_second: crate::bindings::pg_atomic_uint32 { value: 0 },
|
||||
should_limit: crate::bindings::pg_atomic_uint32 { value: 0 },
|
||||
sent_bytes: 0,
|
||||
last_recorded_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
batch_start_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
batch_end_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
|
||||
};
|
||||
|
||||
crate::bindings::WalproposerShmemState {
|
||||
|
||||
@@ -535,6 +535,7 @@ impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrappe
|
||||
match resolved {
|
||||
ShardResolveResult::Found(tenant_shard) => break tenant_shard,
|
||||
ShardResolveResult::NotFound => {
|
||||
MISROUTED_PAGESTREAM_REQUESTS.inc();
|
||||
return Err(GetActiveTimelineError::Tenant(
|
||||
GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
|
||||
));
|
||||
|
||||
@@ -33,6 +33,10 @@ SHLIB_LINK = -lcurl
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S), Darwin)
|
||||
SHLIB_LINK += -framework Security -framework CoreFoundation -framework SystemConfiguration
|
||||
|
||||
# Link against object files for the current macOS version, to avoid spurious linker warnings.
|
||||
MACOSX_DEPLOYMENT_TARGET := $(shell xcrun --sdk macosx --show-sdk-version)
|
||||
export MACOSX_DEPLOYMENT_TARGET
|
||||
endif
|
||||
|
||||
EXTENSION = neon
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "extension_server.h"
|
||||
#include "neon_utils.h"
|
||||
|
||||
static int extension_server_port = 0;
|
||||
int hadron_extension_server_port = 0;
|
||||
static int extension_server_request_timeout = 60;
|
||||
static int extension_server_connect_timeout = 60;
|
||||
|
||||
@@ -47,7 +47,7 @@ neon_download_extension_file_http(const char *filename, bool is_library)
|
||||
curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT, (long)extension_server_connect_timeout /* seconds */ );
|
||||
|
||||
compute_ctl_url = psprintf("http://localhost:%d/extension_server/%s%s",
|
||||
extension_server_port, filename, is_library ? "?is_library=true" : "");
|
||||
hadron_extension_server_port, filename, is_library ? "?is_library=true" : "");
|
||||
|
||||
elog(LOG, "Sending request to compute_ctl: %s", compute_ctl_url);
|
||||
|
||||
@@ -82,7 +82,7 @@ pg_init_extension_server()
|
||||
DefineCustomIntVariable("neon.extension_server_port",
|
||||
"connection string to the compute_ctl",
|
||||
NULL,
|
||||
&extension_server_port,
|
||||
&hadron_extension_server_port,
|
||||
0, 0, INT_MAX,
|
||||
PGC_POSTMASTER,
|
||||
0, /* no flags required */
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#include <math.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "libpq-int.h"
|
||||
|
||||
#include "access/xlog.h"
|
||||
@@ -86,6 +88,10 @@ static int pageserver_response_log_timeout = 10000;
|
||||
/* 2.5 minutes. A bit higher than highest default TCP retransmission timeout */
|
||||
static int pageserver_response_disconnect_timeout = 150000;
|
||||
|
||||
static int conf_refresh_reconnect_attempt_threshold = 16;
|
||||
// Hadron: timeout for refresh errors (1 minute)
|
||||
static uint64 kRefreshErrorTimeoutUSec = 1 * USECS_PER_MINUTE;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
char connstring[MAX_SHARDS][MAX_PAGESERVER_CONNSTRING_SIZE];
|
||||
@@ -130,7 +136,7 @@ static uint64 pagestore_local_counter = 0;
|
||||
typedef enum PSConnectionState {
|
||||
PS_Disconnected, /* no connection yet */
|
||||
PS_Connecting_Startup, /* connection starting up */
|
||||
PS_Connecting_PageStream, /* negotiating pagestream */
|
||||
PS_Connecting_PageStream, /* negotiating pagestream */
|
||||
PS_Connected, /* connected, pagestream established */
|
||||
} PSConnectionState;
|
||||
|
||||
@@ -401,7 +407,7 @@ get_shard_number(BufferTag *tag)
|
||||
}
|
||||
|
||||
static inline void
|
||||
CLEANUP_AND_DISCONNECT(PageServer *shard)
|
||||
CLEANUP_AND_DISCONNECT(PageServer *shard)
|
||||
{
|
||||
if (shard->wes_read)
|
||||
{
|
||||
@@ -423,7 +429,7 @@ CLEANUP_AND_DISCONNECT(PageServer *shard)
|
||||
* complete the connection (e.g. due to receiving an earlier cancellation
|
||||
* during connection start).
|
||||
* Returns true if successfully connected; false if the connection failed.
|
||||
*
|
||||
*
|
||||
* Throws errors in unrecoverable situations, or when this backend's query
|
||||
* is canceled.
|
||||
*/
|
||||
@@ -1030,6 +1036,101 @@ pageserver_disconnect_shard(shardno_t shard_no)
|
||||
shard->state = PS_Disconnected;
|
||||
}
|
||||
|
||||
// BEGIN HADRON
|
||||
/*
|
||||
* Nudge compute_ctl to refresh our configuration. Called when we suspect we may be
|
||||
* connecting to the wrong pageservers due to a stale configuration.
|
||||
*
|
||||
* This is a best-effort operation. If we couldn't send the local loopback HTTP request
|
||||
* to compute_ctl or if the request fails for any reason, we just log the error and move
|
||||
* on.
|
||||
*/
|
||||
|
||||
extern int hadron_extension_server_port;
|
||||
|
||||
// The timestamp (usec) of the first error that occurred while trying to refresh the configuration.
|
||||
// Will be reset to 0 after a successful refresh.
|
||||
static uint64 first_recorded_refresh_error_usec = 0;
|
||||
|
||||
// Request compute_ctl to refresh the configuration. This operation may fail, e.g., if the compute_ctl
|
||||
// is already in the configuration state. The function returns true if the caller needs to cancel the
|
||||
// current query to avoid dead/live lock.
|
||||
static bool
|
||||
hadron_request_configuration_refresh() {
|
||||
static CURL *handle = NULL;
|
||||
CURLcode res;
|
||||
char *compute_ctl_url;
|
||||
bool cancel_query = false;
|
||||
|
||||
if (!lakebase_mode)
|
||||
return false;
|
||||
|
||||
if (handle == NULL)
|
||||
{
|
||||
handle = alloc_curl_handle();
|
||||
|
||||
curl_easy_setopt(handle, CURLOPT_CUSTOMREQUEST, "POST");
|
||||
curl_easy_setopt(handle, CURLOPT_TIMEOUT, 3L /* seconds */ );
|
||||
curl_easy_setopt(handle, CURLOPT_POSTFIELDS, "");
|
||||
}
|
||||
|
||||
// Set the URL
|
||||
compute_ctl_url = psprintf("http://localhost:%d/refresh_configuration", hadron_extension_server_port);
|
||||
|
||||
|
||||
elog(LOG, "Sending refresh configuration request to compute_ctl: %s", compute_ctl_url);
|
||||
|
||||
curl_easy_setopt(handle, CURLOPT_URL, compute_ctl_url);
|
||||
|
||||
res = curl_easy_perform(handle);
|
||||
if (res != CURLE_OK )
|
||||
{
|
||||
elog(WARNING, "refresh_configuration request failed: %s\n", curl_easy_strerror(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
long http_code = 0;
|
||||
curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &http_code);
|
||||
if ( res != CURLE_OK )
|
||||
{
|
||||
elog(WARNING, "compute_ctl refresh_configuration request getinfo failed: %s\n", curl_easy_strerror(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(LOG, "compute_ctl refresh_configuration got HTTP response: %ld\n", http_code);
|
||||
if( http_code == 200 )
|
||||
{
|
||||
first_recorded_refresh_error_usec = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (first_recorded_refresh_error_usec == 0)
|
||||
{
|
||||
first_recorded_refresh_error_usec = GetCurrentTimestamp();
|
||||
}
|
||||
else if(GetCurrentTimestamp() - first_recorded_refresh_error_usec > kRefreshErrorTimeoutUSec)
|
||||
{
|
||||
{
|
||||
first_recorded_refresh_error_usec = 0;
|
||||
cancel_query = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In regular Postgres usage, it is not necessary to manually free memory allocated by palloc (psprintf) because
|
||||
// it will be cleaned up after the "memory context" is reset (e.g. after the query or the transaction is finished).
|
||||
// However, the number of times this function gets called during a single query/transaction can be unbounded due to
|
||||
// the various retry loops around calls to pageservers. Therefore, we need to manually free this memory here.
|
||||
if (compute_ctl_url != NULL)
|
||||
{
|
||||
pfree(compute_ctl_url);
|
||||
}
|
||||
return cancel_query;
|
||||
}
|
||||
// END HADRON
|
||||
|
||||
static bool
|
||||
pageserver_send(shardno_t shard_no, NeonRequest *request)
|
||||
{
|
||||
@@ -1064,6 +1165,11 @@ pageserver_send(shardno_t shard_no, NeonRequest *request)
|
||||
while (!pageserver_connect(shard_no, shard->n_reconnect_attempts < max_reconnect_attempts ? LOG : ERROR))
|
||||
{
|
||||
shard->n_reconnect_attempts += 1;
|
||||
if (shard->n_reconnect_attempts > conf_refresh_reconnect_attempt_threshold
|
||||
&& hadron_request_configuration_refresh() )
|
||||
{
|
||||
neon_shard_log(shard_no, ERROR, "request failed too many times, cancelling query");
|
||||
}
|
||||
}
|
||||
shard->n_reconnect_attempts = 0;
|
||||
} else {
|
||||
@@ -1171,17 +1277,26 @@ pageserver_receive(shardno_t shard_no)
|
||||
pfree(msg);
|
||||
pageserver_disconnect(shard_no);
|
||||
resp = NULL;
|
||||
|
||||
/*
|
||||
* Always poke compute_ctl to request a configuration refresh if we have issues receiving data from pageservers after
|
||||
* successfully connecting to it. It could be an indication that we are connecting to the wrong pageservers (e.g. PS
|
||||
* is in secondary mode or otherwise refuses to respond our request).
|
||||
*/
|
||||
hadron_request_configuration_refresh();
|
||||
}
|
||||
else if (rc == -2)
|
||||
{
|
||||
char *msg = pchomp(PQerrorMessage(pageserver_conn));
|
||||
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: could not read COPY data: %s", msg);
|
||||
}
|
||||
else
|
||||
{
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc);
|
||||
}
|
||||
|
||||
@@ -1249,21 +1364,34 @@ pageserver_try_receive(shardno_t shard_no)
|
||||
neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: psql end of copy data: %s", pchomp(PQerrorMessage(pageserver_conn)));
|
||||
pageserver_disconnect(shard_no);
|
||||
resp = NULL;
|
||||
hadron_request_configuration_refresh();
|
||||
}
|
||||
else if (rc == -2)
|
||||
{
|
||||
char *msg = pchomp(PQerrorMessage(pageserver_conn));
|
||||
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: could not read COPY data: %s", msg);
|
||||
resp = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
pageserver_disconnect(shard_no);
|
||||
hadron_request_configuration_refresh();
|
||||
neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Always poke compute_ctl to request a configuration refresh if we have issues receiving data from pageservers after
|
||||
* successfully connecting to it. It could be an indication that we are connecting to the wrong pageservers (e.g. PS
|
||||
* is in secondary mode or otherwise refuses to respond our request).
|
||||
*/
|
||||
if ( rc < 0 && hadron_request_configuration_refresh() )
|
||||
{
|
||||
neon_shard_log(shard_no, ERROR, "refresh_configuration request failed, cancelling query");
|
||||
}
|
||||
|
||||
shard->nresponses_received++;
|
||||
return (NeonResponse *) resp;
|
||||
}
|
||||
@@ -1460,6 +1588,16 @@ pg_init_libpagestore(void)
|
||||
PGC_SU_BACKEND,
|
||||
0, /* no flags required */
|
||||
NULL, NULL, NULL);
|
||||
DefineCustomIntVariable("hadron.conf_refresh_reconnect_attempt_threshold",
|
||||
"Threshold of the number of consecutive failed pageserver "
|
||||
"connection attempts (per shard) before signaling "
|
||||
"compute_ctl for a configuration refresh.",
|
||||
NULL,
|
||||
&conf_refresh_reconnect_attempt_threshold,
|
||||
16, 0, INT_MAX,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable("neon.pageserver_response_log_timeout",
|
||||
"pageserver response log timeout",
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
PG_MODULE_MAGIC;
|
||||
void _PG_init(void);
|
||||
|
||||
bool lakebase_mode = false;
|
||||
|
||||
static int running_xacts_overflow_policy;
|
||||
static bool monitor_query_exec_time = false;
|
||||
@@ -583,6 +584,16 @@ _PG_init(void)
|
||||
"neon_superuser",
|
||||
PGC_POSTMASTER, 0, NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"neon.lakebase_mode",
|
||||
"Is neon running in Lakebase?",
|
||||
NULL,
|
||||
&lakebase_mode,
|
||||
false,
|
||||
PGC_POSTMASTER,
|
||||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/*
|
||||
* Important: This must happen after other parts of the extension are
|
||||
* loaded, otherwise any settings to GUCs that were set before the
|
||||
|
||||
@@ -21,6 +21,7 @@ extern int wal_acceptor_reconnect_timeout;
|
||||
extern int wal_acceptor_connection_timeout;
|
||||
extern int readahead_getpage_pull_timeout_ms;
|
||||
extern bool disable_wal_prev_lsn_checks;
|
||||
extern bool lakebase_mode;
|
||||
|
||||
extern bool AmPrewarmWorker;
|
||||
|
||||
|
||||
@@ -389,12 +389,21 @@ typedef struct PageserverFeedback
|
||||
*/
|
||||
typedef struct WalRateLimiter
|
||||
{
|
||||
/* If the value is 1, PG backends will hit backpressure. */
|
||||
/* The effective wal write rate. Could be changed dynamically
|
||||
based on whether PG has backpressure or not.*/
|
||||
pg_atomic_uint32 effective_max_wal_bytes_per_second;
|
||||
/* If the value is 1, PG backends will hit backpressure until the time has past batch_end_time_us. */
|
||||
pg_atomic_uint32 should_limit;
|
||||
/* The number of bytes sent in the current second. */
|
||||
uint64 sent_bytes;
|
||||
/* The last recorded time in microsecond. */
|
||||
pg_atomic_uint64 last_recorded_time_us;
|
||||
/* The timestamp when the write starts in the current batch. A batch is a time interval (e.g., )that we
|
||||
track and throttle writes. Most times a batch is 1s, but it could become larger if the PG overwrites the WALs
|
||||
and we will adjust the batch accordingly to compensate (e.g., if PG writes 10MB at once and max WAL write rate
|
||||
is 1MB/s, then the current batch will become 10s). */
|
||||
pg_atomic_uint64 batch_start_time_us;
|
||||
/* The timestamp (in the future) that the current batch should end and accept more writes
|
||||
(after should_limit is set to 1). */
|
||||
pg_atomic_uint64 batch_end_time_us;
|
||||
} WalRateLimiter;
|
||||
/* END_HADRON */
|
||||
|
||||
|
||||
@@ -68,6 +68,14 @@ int safekeeper_proto_version = 3;
|
||||
char *safekeeper_conninfo_options = "";
|
||||
/* BEGIN_HADRON */
|
||||
int databricks_max_wal_mb_per_second = -1;
|
||||
// during throttling, we will limit the effective WAL write rate to 10KB.
|
||||
// PG can still push some WAL to SK, but at a very low rate.
|
||||
int databricks_throttled_max_wal_bytes_per_second = 10 * 1024;
|
||||
// The max sleep time of a batch. This is to make sure the rate limiter does not
|
||||
// overshoot too much and block PG for a very long time.
|
||||
// This is set as 5 minuetes for now. PG can send as much as 10MB of WALs to SK in one batch,
|
||||
// so this effectively caps the write rate to ~30KB/s in the worst case.
|
||||
static uint64 kRateLimitMaxBatchUSecs = 300 * USECS_PER_SEC;
|
||||
/* END_HADRON */
|
||||
|
||||
/* Set to true in the walproposer bgw. */
|
||||
@@ -86,6 +94,7 @@ static HotStandbyFeedback agg_hs_feedback;
|
||||
static void nwp_register_gucs(void);
|
||||
static void assign_neon_safekeepers(const char *newval, void *extra);
|
||||
static uint64 backpressure_lag_impl(void);
|
||||
static uint64 hadron_backpressure_lag_impl(void);
|
||||
static uint64 startup_backpressure_wrap(void);
|
||||
static bool backpressure_throttling_impl(void);
|
||||
static void walprop_register_bgworker(void);
|
||||
@@ -110,9 +119,22 @@ static void rm_safekeeper_event_set(Safekeeper *to_remove, bool is_sk);
|
||||
|
||||
static void CheckGracefulShutdown(WalProposer *wp);
|
||||
|
||||
// HADRON
|
||||
/* BEGIN_HADRON */
|
||||
shardno_t get_num_shards(void);
|
||||
|
||||
static int positive_mb_to_bytes(int mb)
|
||||
{
|
||||
if (mb <= 0)
|
||||
{
|
||||
return mb;
|
||||
}
|
||||
else
|
||||
{
|
||||
return mb * 1024 * 1024;
|
||||
}
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
static void
|
||||
init_walprop_config(bool syncSafekeepers)
|
||||
{
|
||||
@@ -260,6 +282,16 @@ nwp_register_gucs(void)
|
||||
PGC_SUSET,
|
||||
GUC_UNIT_MB,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"databricks.throttled_max_wal_bytes_per_second",
|
||||
"The maximum WAL bytes per second when PG is being throttled.",
|
||||
NULL,
|
||||
&databricks_throttled_max_wal_bytes_per_second,
|
||||
10 * 1024, 0, INT_MAX,
|
||||
PGC_SUSET,
|
||||
GUC_UNIT_BYTE,
|
||||
NULL, NULL, NULL);
|
||||
/* END_HADRON */
|
||||
}
|
||||
|
||||
@@ -398,19 +430,65 @@ assign_neon_safekeepers(const char *newval, void *extra)
|
||||
pfree(oldval);
|
||||
}
|
||||
|
||||
/* Check if we need to suspend inserts because of lagging replication. */
|
||||
static uint64
|
||||
backpressure_lag_impl(void)
|
||||
/* BEGIN_HADRON */
|
||||
static uint64 hadron_backpressure_lag_impl(void)
|
||||
{
|
||||
struct WalproposerShmemState* state = NULL;
|
||||
uint64 lag = 0;
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
if(max_cluster_size < 0){
|
||||
// if max cluster size is not set, then we don't apply backpressure because we're reconfiguring PG
|
||||
return 0;
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
lag = backpressure_lag_impl();
|
||||
state = GetWalpropShmemState();
|
||||
if ( state != NULL && databricks_max_wal_mb_per_second != -1 )
|
||||
{
|
||||
int old_limit = pg_atomic_read_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second);
|
||||
int new_limit = (lag == 0)? positive_mb_to_bytes(databricks_max_wal_mb_per_second) : databricks_throttled_max_wal_bytes_per_second;
|
||||
if( old_limit != new_limit )
|
||||
{
|
||||
uint64 batch_start_time = pg_atomic_read_u64(&state->wal_rate_limiter.batch_start_time_us);
|
||||
uint64 batch_end_time = pg_atomic_read_u64(&state->wal_rate_limiter.batch_end_time_us);
|
||||
// the rate limit has changed, we need to reset the rate limiter's batch end time
|
||||
pg_atomic_write_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second, new_limit);
|
||||
pg_atomic_write_u64(&state->wal_rate_limiter.batch_end_time_us, Min(batch_start_time + USECS_PER_SEC, batch_end_time));
|
||||
}
|
||||
if( new_limit == -1 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pg_atomic_read_u32(&state->wal_rate_limiter.should_limit) == true)
|
||||
{
|
||||
TimestampTz now = GetCurrentTimestamp();
|
||||
struct WalRateLimiter *limiter = &state->wal_rate_limiter;
|
||||
uint64 batch_end_time = pg_atomic_read_u64(&limiter->batch_end_time_us);
|
||||
if ( now >= batch_end_time )
|
||||
{
|
||||
/*
|
||||
* The backend has past the batch end time and it's time to push more WALs.
|
||||
* If the backends are pushing WALs too fast, the wal proposer will rate limit them again.
|
||||
*/
|
||||
uint32 expected = true;
|
||||
pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false);
|
||||
return 0;
|
||||
}
|
||||
return Max(lag, 1);
|
||||
}
|
||||
// rate limiter decides to not throttle, then return 0.
|
||||
return 0;
|
||||
}
|
||||
|
||||
return lag;
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
/* Check if we need to suspend inserts because of lagging replication. */
|
||||
static uint64
|
||||
backpressure_lag_impl(void)
|
||||
{
|
||||
if (max_replication_apply_lag > 0 || max_replication_flush_lag > 0 || max_replication_write_lag > 0)
|
||||
{
|
||||
XLogRecPtr writePtr;
|
||||
@@ -429,45 +507,47 @@ backpressure_lag_impl(void)
|
||||
LSN_FORMAT_ARGS(flushPtr),
|
||||
LSN_FORMAT_ARGS(applyPtr));
|
||||
|
||||
if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB))
|
||||
if (lakebase_mode)
|
||||
{
|
||||
return (myFlushLsn - writePtr - max_replication_write_lag * MB);
|
||||
}
|
||||
// in case PG does not have shard map initialized, we assume PG always has 1 shard at minimum.
|
||||
shardno_t num_shards = Max(1, get_num_shards());
|
||||
int tenant_max_replication_apply_lag = num_shards * max_replication_apply_lag;
|
||||
int tenant_max_replication_flush_lag = num_shards * max_replication_flush_lag;
|
||||
int tenant_max_replication_write_lag = num_shards * max_replication_write_lag;
|
||||
|
||||
if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - flushPtr - max_replication_flush_lag * MB);
|
||||
}
|
||||
if ((writePtr != InvalidXLogRecPtr && tenant_max_replication_write_lag > 0 && myFlushLsn > writePtr + tenant_max_replication_write_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - writePtr - tenant_max_replication_write_lag * MB);
|
||||
}
|
||||
|
||||
if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB))
|
||||
if ((flushPtr != InvalidXLogRecPtr && tenant_max_replication_flush_lag > 0 && myFlushLsn > flushPtr + tenant_max_replication_flush_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - flushPtr - tenant_max_replication_flush_lag * MB);
|
||||
}
|
||||
|
||||
if ((applyPtr != InvalidXLogRecPtr && tenant_max_replication_apply_lag > 0 && myFlushLsn > applyPtr + tenant_max_replication_apply_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - applyPtr - tenant_max_replication_apply_lag * MB);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return (myFlushLsn - applyPtr - max_replication_apply_lag * MB);
|
||||
if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - writePtr - max_replication_write_lag * MB);
|
||||
}
|
||||
|
||||
if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - flushPtr - max_replication_flush_lag * MB);
|
||||
}
|
||||
|
||||
if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB))
|
||||
{
|
||||
return (myFlushLsn - applyPtr - max_replication_apply_lag * MB);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
if (databricks_max_wal_mb_per_second == -1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
state = GetWalpropShmemState();
|
||||
if (state != NULL && !!pg_atomic_read_u32(&state->wal_rate_limiter.should_limit))
|
||||
{
|
||||
TimestampTz now = GetCurrentTimestamp();
|
||||
struct WalRateLimiter *limiter = &state->wal_rate_limiter;
|
||||
uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us);
|
||||
if (now - last_recorded_time > USECS_PER_SEC)
|
||||
{
|
||||
/*
|
||||
* The backend has past 1 second since the last recorded time and it's time to push more WALs.
|
||||
* If the backends are pushing WALs too fast, the wal proposer will rate limit them again.
|
||||
*/
|
||||
uint32 expected = true;
|
||||
pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
/* END_HADRON */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -482,9 +562,9 @@ startup_backpressure_wrap(void)
|
||||
if (AmStartupProcess() || !IsUnderPostmaster)
|
||||
return 0;
|
||||
|
||||
delay_backend_us = &backpressure_lag_impl;
|
||||
delay_backend_us = &hadron_backpressure_lag_impl;
|
||||
|
||||
return backpressure_lag_impl();
|
||||
return hadron_backpressure_lag_impl();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -514,8 +594,10 @@ WalproposerShmemInit(void)
|
||||
pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->currentClusterSize, 0);
|
||||
/* BEGIN_HADRON */
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.effective_max_wal_bytes_per_second, -1);
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_start_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_end_time_us, 0);
|
||||
/* END_HADRON */
|
||||
}
|
||||
}
|
||||
@@ -530,8 +612,10 @@ WalproposerShmemInit_SyncSafekeeper(void)
|
||||
pg_atomic_init_u64(&walprop_shared->mineLastElectedTerm, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0);
|
||||
/* BEGIN_HADRON */
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.effective_max_wal_bytes_per_second, -1);
|
||||
pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_start_time_us, 0);
|
||||
pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_end_time_us, 0);
|
||||
/* END_HADRON */
|
||||
}
|
||||
|
||||
@@ -563,7 +647,7 @@ backpressure_throttling_impl(void)
|
||||
return retry;
|
||||
|
||||
/* Calculate replicas lag */
|
||||
lag = backpressure_lag_impl();
|
||||
lag = hadron_backpressure_lag_impl();
|
||||
if (lag == 0)
|
||||
return retry;
|
||||
|
||||
@@ -659,7 +743,7 @@ record_pageserver_feedback(PageserverFeedback *ps_feedback, shardno_t num_shards
|
||||
|
||||
SpinLockAcquire(&walprop_shared->mutex);
|
||||
|
||||
// Hadron: Update the num_shards from the source-of-truth (shard map) lazily when we receive
|
||||
// Hadron: Update the num_shards from the source-of-truth (shard map) lazily when we receive
|
||||
// a new pageserver feedback.
|
||||
walprop_shared->num_shards = Max(walprop_shared->num_shards, num_shards);
|
||||
|
||||
@@ -1479,6 +1563,7 @@ XLogBroadcastWalProposer(WalProposer *wp)
|
||||
XLogRecPtr endptr;
|
||||
struct WalproposerShmemState *state = NULL;
|
||||
TimestampTz now = 0;
|
||||
int effective_max_wal_bytes_per_second = 0;
|
||||
|
||||
/* Start from the last sent position */
|
||||
startptr = sentPtr;
|
||||
@@ -1533,22 +1618,36 @@ XLogBroadcastWalProposer(WalProposer *wp)
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
state = GetWalpropShmemState();
|
||||
if (databricks_max_wal_mb_per_second != -1 && state != NULL)
|
||||
effective_max_wal_bytes_per_second = pg_atomic_read_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second);
|
||||
if (effective_max_wal_bytes_per_second != -1 && state != NULL)
|
||||
{
|
||||
uint64 max_wal_bytes = (uint64) databricks_max_wal_mb_per_second * 1024 * 1024;
|
||||
struct WalRateLimiter *limiter = &state->wal_rate_limiter;
|
||||
uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us);
|
||||
if (now - last_recorded_time > USECS_PER_SEC)
|
||||
uint64 batch_end_time = pg_atomic_read_u64(&limiter->batch_end_time_us);
|
||||
if ( now >= batch_end_time )
|
||||
{
|
||||
/* Reset the rate limiter */
|
||||
// Reset the rate limiter to start a new batch
|
||||
limiter->sent_bytes = 0;
|
||||
pg_atomic_write_u64(&limiter->last_recorded_time_us, now);
|
||||
pg_atomic_write_u32(&limiter->should_limit, false);
|
||||
pg_atomic_write_u64(&limiter->batch_start_time_us, now);
|
||||
/* tentatively assign the batch end time as 1s from now. This could result in one of the following cases:
|
||||
1. If sent_bytes does not reach effective_max_wal_bytes_per_second in 1s,
|
||||
then we will reset the current batch and clear sent_bytes. No throttling happens.
|
||||
2. Otherwise, we will recompute the end time (below) based on how many bytes are actually written,
|
||||
and throttle PG until the batch end time. */
|
||||
pg_atomic_write_u64(&limiter->batch_end_time_us, now + USECS_PER_SEC);
|
||||
}
|
||||
limiter->sent_bytes += (endptr - startptr);
|
||||
if (limiter->sent_bytes > max_wal_bytes)
|
||||
if (limiter->sent_bytes > effective_max_wal_bytes_per_second)
|
||||
{
|
||||
uint64_t batch_start_time = pg_atomic_read_u64(&limiter->batch_start_time_us);
|
||||
uint64 throttle_usecs = USECS_PER_SEC * limiter->sent_bytes / Max(effective_max_wal_bytes_per_second, 1);
|
||||
if (throttle_usecs > kRateLimitMaxBatchUSecs){
|
||||
elog(LOG, "throttle_usecs %lu is too large, limiting to %lu", throttle_usecs, kRateLimitMaxBatchUSecs);
|
||||
throttle_usecs = kRateLimitMaxBatchUSecs;
|
||||
}
|
||||
|
||||
pg_atomic_write_u32(&limiter->should_limit, true);
|
||||
pg_atomic_write_u64(&limiter->batch_end_time_us, batch_start_time + throttle_usecs);
|
||||
}
|
||||
}
|
||||
/* END_HADRON */
|
||||
@@ -2052,7 +2151,7 @@ walprop_pg_process_safekeeper_feedback(WalProposer *wp, Safekeeper *sk)
|
||||
/* Only one main shard sends non-zero currentClusterSize */
|
||||
if (sk->appendResponse.ps_feedback.currentClusterSize > 0)
|
||||
SetNeonCurrentClusterSize(sk->appendResponse.ps_feedback.currentClusterSize);
|
||||
|
||||
|
||||
if (min_feedback.disk_consistent_lsn != standby_apply_lsn)
|
||||
{
|
||||
standby_apply_lsn = min_feedback.disk_consistent_lsn;
|
||||
|
||||
@@ -25,6 +25,7 @@ use crate::control_plane::messages::MetricsAuxInfo;
|
||||
use crate::error::{ReportableError, UserFacingError};
|
||||
use crate::metrics::{Metrics, NumDbConnectionsGuard};
|
||||
use crate::pqproto::StartupMessageParams;
|
||||
use crate::proxy::connect_compute::TlsNegotiation;
|
||||
use crate::proxy::neon_option;
|
||||
use crate::types::Host;
|
||||
|
||||
@@ -84,6 +85,14 @@ pub(crate) enum ConnectionError {
|
||||
|
||||
#[error("error acquiring resource permit: {0}")]
|
||||
TooManyConnectionAttempts(#[from] ApiLockError),
|
||||
|
||||
#[cfg(test)]
|
||||
#[error("retryable: {retryable}, wakeable: {wakeable}, kind: {kind:?}")]
|
||||
TestError {
|
||||
retryable: bool,
|
||||
wakeable: bool,
|
||||
kind: crate::error::ErrorKind,
|
||||
},
|
||||
}
|
||||
|
||||
impl UserFacingError for ConnectionError {
|
||||
@@ -94,6 +103,8 @@ impl UserFacingError for ConnectionError {
|
||||
"Failed to acquire permit to connect to the database. Too many database connection attempts are currently ongoing.".to_owned()
|
||||
}
|
||||
ConnectionError::TlsError(_) => COULD_NOT_CONNECT.to_owned(),
|
||||
#[cfg(test)]
|
||||
ConnectionError::TestError { .. } => self.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,6 +115,8 @@ impl ReportableError for ConnectionError {
|
||||
ConnectionError::TlsError(_) => crate::error::ErrorKind::Compute,
|
||||
ConnectionError::WakeComputeError(e) => e.get_error_kind(),
|
||||
ConnectionError::TooManyConnectionAttempts(e) => e.get_error_kind(),
|
||||
#[cfg(test)]
|
||||
ConnectionError::TestError { kind, .. } => *kind,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -256,6 +269,7 @@ impl ConnectInfo {
|
||||
async fn connect_raw(
|
||||
&self,
|
||||
config: &ComputeConfig,
|
||||
tls: TlsNegotiation,
|
||||
) -> Result<(SocketAddr, MaybeTlsStream<TcpStream, RustlsStream>), TlsError> {
|
||||
let timeout = config.timeout;
|
||||
|
||||
@@ -298,7 +312,7 @@ impl ConnectInfo {
|
||||
match connect_once(&*addrs).await {
|
||||
Ok((sockaddr, stream)) => Ok((
|
||||
sockaddr,
|
||||
tls::connect_tls(stream, self.ssl_mode, config, host).await?,
|
||||
tls::connect_tls(stream, self.ssl_mode, config, host, tls).await?,
|
||||
)),
|
||||
Err(err) => {
|
||||
warn!("couldn't connect to compute node at {host}:{port}: {err}");
|
||||
@@ -329,9 +343,10 @@ impl ConnectInfo {
|
||||
ctx: &RequestContext,
|
||||
aux: &MetricsAuxInfo,
|
||||
config: &ComputeConfig,
|
||||
tls: TlsNegotiation,
|
||||
) -> Result<ComputeConnection, ConnectionError> {
|
||||
let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
|
||||
let (socket_addr, stream) = self.connect_raw(config).await?;
|
||||
let (socket_addr, stream) = self.connect_raw(config, tls).await?;
|
||||
drop(pause);
|
||||
|
||||
tracing::Span::current().record("compute_id", tracing::field::display(&aux.compute_id));
|
||||
|
||||
@@ -7,6 +7,7 @@ use thiserror::Error;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use crate::pqproto::request_tls;
|
||||
use crate::proxy::connect_compute::TlsNegotiation;
|
||||
use crate::proxy::retry::CouldRetry;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
@@ -35,6 +36,7 @@ pub async fn connect_tls<S, T>(
|
||||
mode: SslMode,
|
||||
tls: &T,
|
||||
host: &str,
|
||||
negotiation: TlsNegotiation,
|
||||
) -> Result<MaybeTlsStream<S, T::Stream>, TlsError>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin + Send,
|
||||
@@ -49,12 +51,15 @@ where
|
||||
SslMode::Prefer | SslMode::Require => {}
|
||||
}
|
||||
|
||||
if !request_tls(&mut stream).await? {
|
||||
if SslMode::Require == mode {
|
||||
return Err(TlsError::Required);
|
||||
}
|
||||
|
||||
return Ok(MaybeTlsStream::Raw(stream));
|
||||
match negotiation {
|
||||
// No TLS request needed
|
||||
TlsNegotiation::Direct => {}
|
||||
// TLS request successful
|
||||
TlsNegotiation::Postgres if request_tls(&mut stream).await? => {}
|
||||
// TLS request failed but is required
|
||||
TlsNegotiation::Postgres if SslMode::Require == mode => return Err(TlsError::Required),
|
||||
// TLS request failed but is not required
|
||||
TlsNegotiation::Postgres => return Ok(MaybeTlsStream::Raw(stream)),
|
||||
}
|
||||
|
||||
Ok(MaybeTlsStream::Tls(
|
||||
|
||||
@@ -16,8 +16,9 @@ use crate::pglb::ClientRequestError;
|
||||
use crate::pglb::handshake::{HandshakeData, handshake};
|
||||
use crate::pglb::passthrough::ProxyPassthrough;
|
||||
use crate::protocol2::{ConnectHeader, ConnectionInfo, read_proxy_protocol};
|
||||
use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute};
|
||||
use crate::proxy::{ErrorSource, forward_compute_params_to_client, send_client_greeting};
|
||||
use crate::proxy::{
|
||||
ErrorSource, connect_compute, forward_compute_params_to_client, send_client_greeting,
|
||||
};
|
||||
use crate::util::run_until_cancelled;
|
||||
|
||||
pub async fn task_main(
|
||||
@@ -215,14 +216,11 @@ pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
};
|
||||
auth_info.set_startup_params(¶ms, true);
|
||||
|
||||
let mut node = connect_to_compute(
|
||||
let mut node = connect_compute::connect_to_compute(
|
||||
ctx,
|
||||
&TcpMechanism {
|
||||
locks: &config.connect_compute_locks,
|
||||
},
|
||||
config,
|
||||
&node_info,
|
||||
config.wake_compute_retry_config,
|
||||
&config.connect_to_compute,
|
||||
connect_compute::TlsNegotiation::Postgres,
|
||||
)
|
||||
.or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) })
|
||||
.await?;
|
||||
|
||||
@@ -17,7 +17,6 @@ use crate::auth::backend::ComputeUserInfo;
|
||||
use crate::auth::backend::jwt::AuthRule;
|
||||
use crate::auth::{AuthError, IpPattern, check_peer_addr_is_in_list};
|
||||
use crate::cache::{Cached, TimedLru};
|
||||
use crate::config::ComputeConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::messages::{ControlPlaneErrorMessage, MetricsAuxInfo};
|
||||
use crate::intern::{AccountIdInt, EndpointIdInt, ProjectIdInt};
|
||||
@@ -72,16 +71,6 @@ pub(crate) struct NodeInfo {
|
||||
pub(crate) aux: MetricsAuxInfo,
|
||||
}
|
||||
|
||||
impl NodeInfo {
|
||||
pub(crate) async fn connect(
|
||||
&self,
|
||||
ctx: &RequestContext,
|
||||
config: &ComputeConfig,
|
||||
) -> Result<compute::ComputeConnection, compute::ConnectionError> {
|
||||
self.conn_info.connect(ctx, &self.aux, config).await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default, Debug)]
|
||||
pub(crate) struct AccessBlockerFlags {
|
||||
pub public_access_blocked: bool,
|
||||
|
||||
82
proxy/src/proxy/connect_auth.rs
Normal file
82
proxy/src/proxy/connect_auth.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::auth::Backend;
|
||||
use crate::auth::backend::ComputeUserInfo;
|
||||
use crate::cache::Cache;
|
||||
use crate::compute::{AuthInfo, ComputeConnection, ConnectionError, PostgresError};
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::client::ControlPlaneClient;
|
||||
use crate::error::{ReportableError, UserFacingError};
|
||||
use crate::proxy::connect_compute::{TlsNegotiation, connect_to_compute};
|
||||
use crate::proxy::retry::ShouldRetryWakeCompute;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum AuthError {
|
||||
#[error(transparent)]
|
||||
Auth(#[from] PostgresError),
|
||||
#[error(transparent)]
|
||||
Connect(#[from] ConnectionError),
|
||||
}
|
||||
|
||||
impl UserFacingError for AuthError {
|
||||
fn to_string_client(&self) -> String {
|
||||
match self {
|
||||
AuthError::Auth(postgres_error) => postgres_error.to_string_client(),
|
||||
AuthError::Connect(connection_error) => connection_error.to_string_client(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReportableError for AuthError {
|
||||
fn get_error_kind(&self) -> crate::error::ErrorKind {
|
||||
match self {
|
||||
AuthError::Auth(postgres_error) => postgres_error.get_error_kind(),
|
||||
AuthError::Connect(connection_error) => connection_error.get_error_kind(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to connect to the compute node, retrying if necessary.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn connect_to_compute_and_auth(
|
||||
ctx: &RequestContext,
|
||||
config: &ProxyConfig,
|
||||
user_info: &Backend<'_, ComputeUserInfo>,
|
||||
auth_info: AuthInfo,
|
||||
tls: TlsNegotiation,
|
||||
) -> Result<ComputeConnection, AuthError> {
|
||||
let mut attempt = 0;
|
||||
|
||||
// NOTE: This is messy, but should hopefully be detangled with PGLB.
|
||||
// We wanted to separate the concerns of **connect** to compute (a PGLB operation),
|
||||
// from **authenticate** to compute (a NeonKeeper operation).
|
||||
//
|
||||
// This unfortunately removed retry handling for one error case where
|
||||
// the compute was cached, and we connected, but the compute cache was actually stale
|
||||
// and is associated with the wrong endpoint. We detect this when the **authentication** fails.
|
||||
// As such, we retry once here if the `authenticate` function fails and the error is valid to retry.
|
||||
loop {
|
||||
attempt += 1;
|
||||
let mut node = connect_to_compute(ctx, config, user_info, tls).await?;
|
||||
|
||||
let res = auth_info.authenticate(ctx, &mut node).await;
|
||||
match res {
|
||||
Ok(()) => return Ok(node),
|
||||
Err(e) => {
|
||||
if attempt < 2
|
||||
&& let Backend::ControlPlane(cplane, user_info) = user_info
|
||||
&& let ControlPlaneClient::ProxyV1(cplane_proxy_v1) = &**cplane
|
||||
&& e.should_retry_wake_compute()
|
||||
{
|
||||
tracing::warn!(error = ?e, "retrying wake compute");
|
||||
let key = user_info.endpoint_cache_key();
|
||||
cplane_proxy_v1.caches.node_info.invalidate(&key);
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(e)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,15 @@
|
||||
use async_trait::async_trait;
|
||||
use tokio::time;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::compute::{self, COULD_NOT_CONNECT, ComputeConnection};
|
||||
use crate::config::{ComputeConfig, RetryConfig};
|
||||
use crate::config::{ComputeConfig, ProxyConfig, RetryConfig};
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::errors::WakeComputeError;
|
||||
use crate::control_plane::locks::ApiLocks;
|
||||
use crate::control_plane::{self, NodeInfo};
|
||||
use crate::error::ReportableError;
|
||||
use crate::metrics::{
|
||||
ConnectOutcome, ConnectionFailureKind, Metrics, RetriesMetricGroup, RetryType,
|
||||
};
|
||||
use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute, retry_after, should_retry};
|
||||
use crate::proxy::retry::{ShouldRetryWakeCompute, retry_after, should_retry};
|
||||
use crate::proxy::wake_compute::{WakeComputeBackend, wake_compute};
|
||||
use crate::types::Host;
|
||||
|
||||
@@ -35,29 +32,32 @@ pub(crate) fn invalidate_cache(node_info: control_plane::CachedNodeInfo) -> Node
|
||||
node_info.invalidate()
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait ConnectMechanism {
|
||||
type Connection;
|
||||
type ConnectError: ReportableError;
|
||||
type Error: From<Self::ConnectError>;
|
||||
async fn connect_once(
|
||||
&self,
|
||||
ctx: &RequestContext,
|
||||
node_info: &control_plane::CachedNodeInfo,
|
||||
config: &ComputeConfig,
|
||||
) -> Result<Self::Connection, Self::ConnectError>;
|
||||
) -> Result<Self::Connection, compute::ConnectionError>;
|
||||
}
|
||||
|
||||
pub(crate) struct TcpMechanism {
|
||||
struct TcpMechanism<'a> {
|
||||
/// connect_to_compute concurrency lock
|
||||
pub(crate) locks: &'static ApiLocks<Host>,
|
||||
locks: &'a ApiLocks<Host>,
|
||||
tls: TlsNegotiation,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ConnectMechanism for TcpMechanism {
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum TlsNegotiation {
|
||||
/// TLS is assumed
|
||||
Direct,
|
||||
/// We must ask for TLS using the postgres SSLRequest message
|
||||
Postgres,
|
||||
}
|
||||
|
||||
impl ConnectMechanism for TcpMechanism<'_> {
|
||||
type Connection = ComputeConnection;
|
||||
type ConnectError = compute::ConnectionError;
|
||||
type Error = compute::ConnectionError;
|
||||
|
||||
#[tracing::instrument(skip_all, fields(
|
||||
pid = tracing::field::Empty,
|
||||
@@ -68,25 +68,47 @@ impl ConnectMechanism for TcpMechanism {
|
||||
ctx: &RequestContext,
|
||||
node_info: &control_plane::CachedNodeInfo,
|
||||
config: &ComputeConfig,
|
||||
) -> Result<ComputeConnection, Self::Error> {
|
||||
) -> Result<ComputeConnection, compute::ConnectionError> {
|
||||
let permit = self.locks.get_permit(&node_info.conn_info.host).await?;
|
||||
permit.release_result(node_info.connect(ctx, config).await)
|
||||
|
||||
permit.release_result(
|
||||
node_info
|
||||
.conn_info
|
||||
.connect(ctx, &node_info.aux, config, self.tls)
|
||||
.await,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to connect to the compute node, retrying if necessary.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn connect_to_compute<M: ConnectMechanism, B: WakeComputeBackend>(
|
||||
pub(crate) async fn connect_to_compute<B: WakeComputeBackend>(
|
||||
ctx: &RequestContext,
|
||||
config: &ProxyConfig,
|
||||
user_info: &B,
|
||||
tls: TlsNegotiation,
|
||||
) -> Result<ComputeConnection, compute::ConnectionError> {
|
||||
connect_to_compute_inner(
|
||||
ctx,
|
||||
&TcpMechanism {
|
||||
locks: &config.connect_compute_locks,
|
||||
tls,
|
||||
},
|
||||
user_info,
|
||||
config.wake_compute_retry_config,
|
||||
&config.connect_to_compute,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Try to connect to the compute node, retrying if necessary.
|
||||
pub(crate) async fn connect_to_compute_inner<M: ConnectMechanism, B: WakeComputeBackend>(
|
||||
ctx: &RequestContext,
|
||||
mechanism: &M,
|
||||
user_info: &B,
|
||||
wake_compute_retry_config: RetryConfig,
|
||||
compute: &ComputeConfig,
|
||||
) -> Result<M::Connection, M::Error>
|
||||
where
|
||||
M::ConnectError: CouldRetry + ShouldRetryWakeCompute + std::fmt::Debug,
|
||||
M::Error: From<WakeComputeError>,
|
||||
{
|
||||
) -> Result<M::Connection, compute::ConnectionError> {
|
||||
let mut num_retries = 0;
|
||||
let node_info =
|
||||
wake_compute(&mut num_retries, ctx, user_info, wake_compute_retry_config).await?;
|
||||
@@ -120,7 +142,7 @@ where
|
||||
},
|
||||
num_retries.into(),
|
||||
);
|
||||
return Err(err.into());
|
||||
return Err(err);
|
||||
}
|
||||
node_info
|
||||
} else {
|
||||
@@ -161,7 +183,7 @@ where
|
||||
},
|
||||
num_retries.into(),
|
||||
);
|
||||
return Err(e.into());
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
warn!(error = ?e, num_retries, retriable = true, COULD_NOT_CONNECT);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub(crate) mod connect_auth;
|
||||
pub(crate) mod connect_compute;
|
||||
pub(crate) mod retry;
|
||||
pub(crate) mod wake_compute;
|
||||
@@ -23,17 +24,13 @@ use tokio::net::TcpStream;
|
||||
use tokio::sync::oneshot;
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::cache::Cache;
|
||||
use crate::cancellation::{CancelClosure, CancellationHandler};
|
||||
use crate::compute::{ComputeConnection, PostgresError, RustlsStream};
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::client::ControlPlaneClient;
|
||||
pub use crate::pglb::copy_bidirectional::{ErrorSource, copy_bidirectional_client_compute};
|
||||
use crate::pglb::{ClientMode, ClientRequestError};
|
||||
use crate::pqproto::{BeMessage, CancelKeyData, StartupMessageParams};
|
||||
use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute};
|
||||
use crate::proxy::retry::ShouldRetryWakeCompute;
|
||||
use crate::rate_limiter::EndpointRateLimiter;
|
||||
use crate::stream::{PqStream, Stream};
|
||||
use crate::types::EndpointCacheKey;
|
||||
@@ -95,61 +92,24 @@ pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
||||
let mut auth_info = compute::AuthInfo::with_auth_keys(creds.keys);
|
||||
auth_info.set_startup_params(params, params_compat);
|
||||
|
||||
let mut node;
|
||||
let mut attempt = 0;
|
||||
let connect = TcpMechanism {
|
||||
locks: &config.connect_compute_locks,
|
||||
};
|
||||
let backend = auth::Backend::ControlPlane(cplane, creds.info);
|
||||
|
||||
// NOTE: This is messy, but should hopefully be detangled with PGLB.
|
||||
// We wanted to separate the concerns of **connect** to compute (a PGLB operation),
|
||||
// from **authenticate** to compute (a NeonKeeper operation).
|
||||
//
|
||||
// This unfortunately removed retry handling for one error case where
|
||||
// the compute was cached, and we connected, but the compute cache was actually stale
|
||||
// and is associated with the wrong endpoint. We detect this when the **authentication** fails.
|
||||
// As such, we retry once here if the `authenticate` function fails and the error is valid to retry.
|
||||
loop {
|
||||
attempt += 1;
|
||||
// TODO: callback to pglb
|
||||
let res = connect_auth::connect_to_compute_and_auth(
|
||||
ctx,
|
||||
config,
|
||||
&backend,
|
||||
auth_info,
|
||||
connect_compute::TlsNegotiation::Postgres,
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO: callback to pglb
|
||||
let res = connect_to_compute(
|
||||
ctx,
|
||||
&connect,
|
||||
&backend,
|
||||
config.wake_compute_retry_config,
|
||||
&config.connect_to_compute,
|
||||
)
|
||||
.await;
|
||||
let mut node = match res {
|
||||
Ok(node) => node,
|
||||
Err(e) => Err(client.throw_error(e, Some(ctx)).await)?,
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(n) => node = n,
|
||||
Err(e) => return Err(client.throw_error(e, Some(ctx)).await)?,
|
||||
}
|
||||
|
||||
let auth::Backend::ControlPlane(cplane, user_info) = &backend else {
|
||||
unreachable!("ensured above");
|
||||
};
|
||||
|
||||
let res = auth_info.authenticate(ctx, &mut node).await;
|
||||
match res {
|
||||
Ok(()) => {
|
||||
send_client_greeting(ctx, &config.greetings, client);
|
||||
break;
|
||||
}
|
||||
Err(e) if attempt < 2 && e.should_retry_wake_compute() => {
|
||||
tracing::warn!(error = ?e, "retrying wake compute");
|
||||
|
||||
#[allow(irrefutable_let_patterns)]
|
||||
if let ControlPlaneClient::ProxyV1(cplane_proxy_v1) = &**cplane {
|
||||
let key = user_info.endpoint_cache_key();
|
||||
cplane_proxy_v1.caches.node_info.invalidate(&key);
|
||||
}
|
||||
}
|
||||
Err(e) => Err(client.throw_error(e, Some(ctx)).await)?,
|
||||
}
|
||||
}
|
||||
send_client_greeting(ctx, &config.greetings, client);
|
||||
|
||||
let auth::Backend::ControlPlane(_, user_info) = backend else {
|
||||
unreachable!("ensured above");
|
||||
|
||||
@@ -31,18 +31,6 @@ impl CouldRetry for io::Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl CouldRetry for postgres_client::error::DbError {
|
||||
fn could_retry(&self) -> bool {
|
||||
use postgres_client::error::SqlState;
|
||||
matches!(
|
||||
self.code(),
|
||||
&SqlState::CONNECTION_FAILURE
|
||||
| &SqlState::CONNECTION_EXCEPTION
|
||||
| &SqlState::CONNECTION_DOES_NOT_EXIST
|
||||
| &SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION,
|
||||
)
|
||||
}
|
||||
}
|
||||
impl ShouldRetryWakeCompute for postgres_client::error::DbError {
|
||||
fn should_retry_wake_compute(&self) -> bool {
|
||||
use postgres_client::error::SqlState;
|
||||
@@ -73,17 +61,6 @@ impl ShouldRetryWakeCompute for postgres_client::error::DbError {
|
||||
}
|
||||
}
|
||||
|
||||
impl CouldRetry for postgres_client::Error {
|
||||
fn could_retry(&self) -> bool {
|
||||
if let Some(io_err) = self.source().and_then(|x| x.downcast_ref()) {
|
||||
io::Error::could_retry(io_err)
|
||||
} else if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) {
|
||||
postgres_client::error::DbError::could_retry(db_err)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
impl ShouldRetryWakeCompute for postgres_client::Error {
|
||||
fn should_retry_wake_compute(&self) -> bool {
|
||||
if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) {
|
||||
@@ -102,6 +79,8 @@ impl CouldRetry for compute::ConnectionError {
|
||||
compute::ConnectionError::TlsError(err) => err.could_retry(),
|
||||
compute::ConnectionError::WakeComputeError(err) => err.could_retry(),
|
||||
compute::ConnectionError::TooManyConnectionAttempts(_) => false,
|
||||
#[cfg(test)]
|
||||
compute::ConnectionError::TestError { retryable, .. } => *retryable,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -110,6 +89,8 @@ impl ShouldRetryWakeCompute for compute::ConnectionError {
|
||||
match self {
|
||||
// the cache entry was not checked for validity
|
||||
compute::ConnectionError::TooManyConnectionAttempts(_) => false,
|
||||
#[cfg(test)]
|
||||
compute::ConnectionError::TestError { wakeable, .. } => *wakeable,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,13 +24,13 @@ use crate::context::RequestContext;
|
||||
use crate::control_plane::client::{ControlPlaneClient, TestControlPlaneClient};
|
||||
use crate::control_plane::messages::{ControlPlaneErrorMessage, Details, MetricsAuxInfo, Status};
|
||||
use crate::control_plane::{self, CachedNodeInfo, NodeInfo, NodeInfoCache};
|
||||
use crate::error::{ErrorKind, ReportableError};
|
||||
use crate::error::ErrorKind;
|
||||
use crate::pglb::ERR_INSECURE_CONNECTION;
|
||||
use crate::pglb::handshake::{HandshakeData, handshake};
|
||||
use crate::pqproto::BeMessage;
|
||||
use crate::proxy::NeonOptions;
|
||||
use crate::proxy::connect_compute::{ConnectMechanism, connect_to_compute};
|
||||
use crate::proxy::retry::{ShouldRetryWakeCompute, retry_after};
|
||||
use crate::proxy::connect_compute::{ConnectMechanism, connect_to_compute_inner};
|
||||
use crate::proxy::retry::retry_after;
|
||||
use crate::stream::{PqStream, Stream};
|
||||
use crate::tls::client_config::compute_client_config_with_certs;
|
||||
use crate::tls::server_config::CertResolver;
|
||||
@@ -430,71 +430,36 @@ impl TestConnectMechanism {
|
||||
#[derive(Debug)]
|
||||
struct TestConnection;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestConnectError {
|
||||
retryable: bool,
|
||||
wakeable: bool,
|
||||
kind: crate::error::ErrorKind,
|
||||
}
|
||||
|
||||
impl ReportableError for TestConnectError {
|
||||
fn get_error_kind(&self) -> crate::error::ErrorKind {
|
||||
self.kind
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TestConnectError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{self:?}")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for TestConnectError {}
|
||||
|
||||
impl CouldRetry for TestConnectError {
|
||||
fn could_retry(&self) -> bool {
|
||||
self.retryable
|
||||
}
|
||||
}
|
||||
impl ShouldRetryWakeCompute for TestConnectError {
|
||||
fn should_retry_wake_compute(&self) -> bool {
|
||||
self.wakeable
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ConnectMechanism for TestConnectMechanism {
|
||||
type Connection = TestConnection;
|
||||
type ConnectError = TestConnectError;
|
||||
type Error = anyhow::Error;
|
||||
|
||||
async fn connect_once(
|
||||
&self,
|
||||
_ctx: &RequestContext,
|
||||
_node_info: &control_plane::CachedNodeInfo,
|
||||
_config: &ComputeConfig,
|
||||
) -> Result<Self::Connection, Self::ConnectError> {
|
||||
) -> Result<Self::Connection, compute::ConnectionError> {
|
||||
let mut counter = self.counter.lock().unwrap();
|
||||
let action = self.sequence[*counter];
|
||||
*counter += 1;
|
||||
match action {
|
||||
ConnectAction::Connect => Ok(TestConnection),
|
||||
ConnectAction::Retry => Err(TestConnectError {
|
||||
ConnectAction::Retry => Err(compute::ConnectionError::TestError {
|
||||
retryable: true,
|
||||
wakeable: true,
|
||||
kind: ErrorKind::Compute,
|
||||
}),
|
||||
ConnectAction::RetryNoWake => Err(TestConnectError {
|
||||
ConnectAction::RetryNoWake => Err(compute::ConnectionError::TestError {
|
||||
retryable: true,
|
||||
wakeable: false,
|
||||
kind: ErrorKind::Compute,
|
||||
}),
|
||||
ConnectAction::Fail => Err(TestConnectError {
|
||||
ConnectAction::Fail => Err(compute::ConnectionError::TestError {
|
||||
retryable: false,
|
||||
wakeable: true,
|
||||
kind: ErrorKind::Compute,
|
||||
}),
|
||||
ConnectAction::FailNoWake => Err(TestConnectError {
|
||||
ConnectAction::FailNoWake => Err(compute::ConnectionError::TestError {
|
||||
retryable: false,
|
||||
wakeable: false,
|
||||
kind: ErrorKind::Compute,
|
||||
@@ -620,7 +585,7 @@ async fn connect_to_compute_success() {
|
||||
let mechanism = TestConnectMechanism::new(vec![Wake, Connect]);
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let config = config();
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
mechanism.verify();
|
||||
@@ -634,7 +599,7 @@ async fn connect_to_compute_retry() {
|
||||
let mechanism = TestConnectMechanism::new(vec![Wake, Retry, Wake, Connect]);
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let config = config();
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
mechanism.verify();
|
||||
@@ -649,7 +614,7 @@ async fn connect_to_compute_non_retry_1() {
|
||||
let mechanism = TestConnectMechanism::new(vec![Wake, Retry, Wake, Fail]);
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let config = config();
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
.await
|
||||
.unwrap_err();
|
||||
mechanism.verify();
|
||||
@@ -664,7 +629,7 @@ async fn connect_to_compute_non_retry_2() {
|
||||
let mechanism = TestConnectMechanism::new(vec![Wake, Fail, Wake, Connect]);
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let config = config();
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
mechanism.verify();
|
||||
@@ -686,7 +651,7 @@ async fn connect_to_compute_non_retry_3() {
|
||||
backoff_factor: 2.0,
|
||||
};
|
||||
let config = config();
|
||||
connect_to_compute(
|
||||
connect_to_compute_inner(
|
||||
&ctx,
|
||||
&mechanism,
|
||||
&user_info,
|
||||
@@ -707,7 +672,7 @@ async fn wake_retry() {
|
||||
let mechanism = TestConnectMechanism::new(vec![WakeRetry, Wake, Connect]);
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let config = config();
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
mechanism.verify();
|
||||
@@ -722,7 +687,7 @@ async fn wake_non_retry() {
|
||||
let mechanism = TestConnectMechanism::new(vec![WakeRetry, WakeFail]);
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let config = config();
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config)
|
||||
.await
|
||||
.unwrap_err();
|
||||
mechanism.verify();
|
||||
@@ -741,7 +706,7 @@ async fn fail_but_wake_invalidates_cache() {
|
||||
let user = helper_create_connect_info(&mech);
|
||||
let cfg = config();
|
||||
|
||||
connect_to_compute(&ctx, &mech, &user, cfg.retry, &cfg)
|
||||
connect_to_compute_inner(&ctx, &mech, &user, cfg.retry, &cfg)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -762,7 +727,7 @@ async fn fail_no_wake_skips_cache_invalidation() {
|
||||
let user = helper_create_connect_info(&mech);
|
||||
let cfg = config();
|
||||
|
||||
connect_to_compute(&ctx, &mech, &user, cfg.retry, &cfg)
|
||||
connect_to_compute_inner(&ctx, &mech, &user, cfg.retry, &cfg)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -783,7 +748,7 @@ async fn retry_but_wake_invalidates_cache() {
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let cfg = config();
|
||||
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
.await
|
||||
.unwrap();
|
||||
mechanism.verify();
|
||||
@@ -806,7 +771,7 @@ async fn retry_no_wake_skips_invalidation() {
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let cfg = config();
|
||||
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
.await
|
||||
.unwrap_err();
|
||||
mechanism.verify();
|
||||
@@ -829,7 +794,7 @@ async fn retry_no_wake_error_fast() {
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let cfg = config();
|
||||
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
.await
|
||||
.unwrap_err();
|
||||
mechanism.verify();
|
||||
@@ -852,7 +817,7 @@ async fn retry_cold_wake_skips_invalidation() {
|
||||
let user_info = helper_create_connect_info(&mechanism);
|
||||
let cfg = config();
|
||||
|
||||
connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg)
|
||||
.await
|
||||
.unwrap();
|
||||
mechanism.verify();
|
||||
|
||||
@@ -1,46 +1,40 @@
|
||||
use std::io;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use ed25519_dalek::SigningKey;
|
||||
use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer};
|
||||
use jose_jwk::jose_b64;
|
||||
use postgres_client::config::SslMode;
|
||||
use postgres_client::maybe_tls_stream::MaybeTlsStream;
|
||||
use rand_core::OsRng;
|
||||
use rustls::pki_types::{DnsName, ServerName};
|
||||
use tokio::net::{TcpStream, lookup_host};
|
||||
use tokio_rustls::TlsConnector;
|
||||
use tracing::field::display;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use super::AsyncRW;
|
||||
use super::conn_pool::poll_client;
|
||||
use super::conn_pool_lib::{Client, ConnInfo, EndpointConnPool, GlobalConnPool};
|
||||
use super::http_conn_pool::{self, HttpConnPool, Send, poll_http2_client};
|
||||
use super::http_conn_pool::{self, HttpConnPool, LocalProxyClient, poll_http2_client};
|
||||
use super::local_conn_pool::{self, EXT_NAME, EXT_SCHEMA, EXT_VERSION, LocalConnPool};
|
||||
use crate::auth::backend::local::StaticAuthRules;
|
||||
use crate::auth::backend::{ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo};
|
||||
use crate::auth::backend::{ComputeCredentials, ComputeUserInfo};
|
||||
use crate::auth::{self, AuthError};
|
||||
use crate::compute;
|
||||
use crate::compute_ctl::{
|
||||
ComputeCtlError, ExtensionInstallRequest, Privilege, SetRoleGrantsRequest,
|
||||
};
|
||||
use crate::config::{ComputeConfig, ProxyConfig};
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::context::RequestContext;
|
||||
use crate::control_plane::CachedNodeInfo;
|
||||
use crate::control_plane::client::ApiLockError;
|
||||
use crate::control_plane::errors::{GetAuthInfoError, WakeComputeError};
|
||||
use crate::control_plane::locks::ApiLocks;
|
||||
use crate::error::{ErrorKind, ReportableError, UserFacingError};
|
||||
use crate::intern::EndpointIdInt;
|
||||
use crate::proxy::connect_compute::ConnectMechanism;
|
||||
use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute};
|
||||
use crate::pqproto::StartupMessageParams;
|
||||
use crate::proxy::{connect_auth, connect_compute};
|
||||
use crate::rate_limiter::EndpointRateLimiter;
|
||||
use crate::types::{EndpointId, Host, LOCAL_PROXY_SUFFIX};
|
||||
use crate::types::{EndpointId, LOCAL_PROXY_SUFFIX};
|
||||
|
||||
pub(crate) struct PoolingBackend {
|
||||
pub(crate) http_conn_pool: Arc<GlobalConnPool<Send, HttpConnPool<Send>>>,
|
||||
pub(crate) http_conn_pool:
|
||||
Arc<GlobalConnPool<LocalProxyClient, HttpConnPool<LocalProxyClient>>>,
|
||||
pub(crate) local_pool: Arc<LocalConnPool<postgres_client::Client>>,
|
||||
pub(crate) pool:
|
||||
Arc<GlobalConnPool<postgres_client::Client, EndpointConnPool<postgres_client::Client>>>,
|
||||
@@ -185,20 +179,42 @@ impl PoolingBackend {
|
||||
tracing::Span::current().record("conn_id", display(conn_id));
|
||||
info!(%conn_id, "pool: opening a new connection '{conn_info}'");
|
||||
let backend = self.auth_backend.as_ref().map(|()| keys.info);
|
||||
crate::proxy::connect_compute::connect_to_compute(
|
||||
|
||||
let mut params = StartupMessageParams::default();
|
||||
params.insert("database", &conn_info.dbname);
|
||||
params.insert("user", &conn_info.user_info.user);
|
||||
|
||||
let mut auth_info = compute::AuthInfo::with_auth_keys(keys.keys);
|
||||
auth_info.set_startup_params(¶ms, true);
|
||||
|
||||
let node = connect_auth::connect_to_compute_and_auth(
|
||||
ctx,
|
||||
&TokioMechanism {
|
||||
conn_id,
|
||||
conn_info,
|
||||
pool: self.pool.clone(),
|
||||
locks: &self.config.connect_compute_locks,
|
||||
keys: keys.keys,
|
||||
},
|
||||
self.config,
|
||||
&backend,
|
||||
self.config.wake_compute_retry_config,
|
||||
&self.config.connect_to_compute,
|
||||
auth_info,
|
||||
connect_compute::TlsNegotiation::Postgres,
|
||||
)
|
||||
.await
|
||||
.await?;
|
||||
|
||||
let (client, connection) = postgres_client::connect::managed(
|
||||
node.stream,
|
||||
Some(node.socket_addr.ip()),
|
||||
postgres_client::config::Host::Tcp(node.hostname.to_string()),
|
||||
node.socket_addr.port(),
|
||||
node.ssl_mode,
|
||||
Some(self.config.connect_to_compute.timeout),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(poll_client(
|
||||
self.pool.clone(),
|
||||
ctx,
|
||||
conn_info,
|
||||
client,
|
||||
connection,
|
||||
conn_id,
|
||||
node.aux,
|
||||
))
|
||||
}
|
||||
|
||||
// Wake up the destination if needed
|
||||
@@ -210,7 +226,7 @@ impl PoolingBackend {
|
||||
&self,
|
||||
ctx: &RequestContext,
|
||||
conn_info: ConnInfo,
|
||||
) -> Result<http_conn_pool::Client<Send>, HttpConnError> {
|
||||
) -> Result<http_conn_pool::Client<LocalProxyClient>, HttpConnError> {
|
||||
debug!("pool: looking for an existing connection");
|
||||
if let Ok(Some(client)) = self.http_conn_pool.get(ctx, &conn_info) {
|
||||
return Ok(client);
|
||||
@@ -227,19 +243,38 @@ impl PoolingBackend {
|
||||
)),
|
||||
options: conn_info.user_info.options.clone(),
|
||||
});
|
||||
crate::proxy::connect_compute::connect_to_compute(
|
||||
|
||||
let node = connect_compute::connect_to_compute(
|
||||
ctx,
|
||||
&HyperMechanism {
|
||||
conn_id,
|
||||
conn_info,
|
||||
pool: self.http_conn_pool.clone(),
|
||||
locks: &self.config.connect_compute_locks,
|
||||
},
|
||||
self.config,
|
||||
&backend,
|
||||
self.config.wake_compute_retry_config,
|
||||
&self.config.connect_to_compute,
|
||||
connect_compute::TlsNegotiation::Direct,
|
||||
)
|
||||
.await
|
||||
.await?;
|
||||
|
||||
let stream = match node.stream.into_framed().into_inner() {
|
||||
MaybeTlsStream::Raw(s) => Box::pin(s) as AsyncRW,
|
||||
MaybeTlsStream::Tls(s) => Box::pin(s) as AsyncRW,
|
||||
};
|
||||
|
||||
let (client, connection) = hyper::client::conn::http2::Builder::new(TokioExecutor::new())
|
||||
.timer(TokioTimer::new())
|
||||
.keep_alive_interval(Duration::from_secs(20))
|
||||
.keep_alive_while_idle(true)
|
||||
.keep_alive_timeout(Duration::from_secs(5))
|
||||
.handshake(TokioIo::new(stream))
|
||||
.await
|
||||
.map_err(LocalProxyConnError::H2)?;
|
||||
|
||||
Ok(poll_http2_client(
|
||||
self.http_conn_pool.clone(),
|
||||
ctx,
|
||||
&conn_info,
|
||||
client,
|
||||
connection,
|
||||
conn_id,
|
||||
node.aux.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Connect to postgres over localhost.
|
||||
@@ -379,6 +414,8 @@ fn create_random_jwk() -> (SigningKey, jose_jwk::Key) {
|
||||
pub(crate) enum HttpConnError {
|
||||
#[error("pooled connection closed at inconsistent state")]
|
||||
ConnectionClosedAbruptly(#[from] tokio::sync::watch::error::SendError<uuid::Uuid>),
|
||||
#[error("could not connect to compute")]
|
||||
ConnectError(#[from] compute::ConnectionError),
|
||||
#[error("could not connect to postgres in compute")]
|
||||
PostgresConnectionError(#[from] postgres_client::Error),
|
||||
#[error("could not connect to local-proxy in compute")]
|
||||
@@ -398,10 +435,19 @@ pub(crate) enum HttpConnError {
|
||||
TooManyConnectionAttempts(#[from] ApiLockError),
|
||||
}
|
||||
|
||||
impl From<connect_auth::AuthError> for HttpConnError {
|
||||
fn from(value: connect_auth::AuthError) -> Self {
|
||||
match value {
|
||||
connect_auth::AuthError::Auth(compute::PostgresError::Postgres(error)) => {
|
||||
Self::PostgresConnectionError(error)
|
||||
}
|
||||
connect_auth::AuthError::Connect(error) => Self::ConnectError(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum LocalProxyConnError {
|
||||
#[error("error with connection to local-proxy")]
|
||||
Io(#[source] std::io::Error),
|
||||
#[error("could not establish h2 connection")]
|
||||
H2(#[from] hyper::Error),
|
||||
}
|
||||
@@ -409,6 +455,7 @@ pub(crate) enum LocalProxyConnError {
|
||||
impl ReportableError for HttpConnError {
|
||||
fn get_error_kind(&self) -> ErrorKind {
|
||||
match self {
|
||||
HttpConnError::ConnectError(_) => ErrorKind::Compute,
|
||||
HttpConnError::ConnectionClosedAbruptly(_) => ErrorKind::Compute,
|
||||
HttpConnError::PostgresConnectionError(p) => {
|
||||
if p.as_db_error().is_some() {
|
||||
@@ -433,6 +480,7 @@ impl ReportableError for HttpConnError {
|
||||
impl UserFacingError for HttpConnError {
|
||||
fn to_string_client(&self) -> String {
|
||||
match self {
|
||||
HttpConnError::ConnectError(p) => p.to_string_client(),
|
||||
HttpConnError::ConnectionClosedAbruptly(_) => self.to_string(),
|
||||
HttpConnError::PostgresConnectionError(p) => p.to_string(),
|
||||
HttpConnError::LocalProxyConnectionError(p) => p.to_string(),
|
||||
@@ -448,36 +496,9 @@ impl UserFacingError for HttpConnError {
|
||||
}
|
||||
}
|
||||
|
||||
impl CouldRetry for HttpConnError {
|
||||
fn could_retry(&self) -> bool {
|
||||
match self {
|
||||
HttpConnError::PostgresConnectionError(e) => e.could_retry(),
|
||||
HttpConnError::LocalProxyConnectionError(e) => e.could_retry(),
|
||||
HttpConnError::ComputeCtl(_) => false,
|
||||
HttpConnError::ConnectionClosedAbruptly(_) => false,
|
||||
HttpConnError::JwtPayloadError(_) => false,
|
||||
HttpConnError::GetAuthInfo(_) => false,
|
||||
HttpConnError::AuthError(_) => false,
|
||||
HttpConnError::WakeCompute(_) => false,
|
||||
HttpConnError::TooManyConnectionAttempts(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl ShouldRetryWakeCompute for HttpConnError {
|
||||
fn should_retry_wake_compute(&self) -> bool {
|
||||
match self {
|
||||
HttpConnError::PostgresConnectionError(e) => e.should_retry_wake_compute(),
|
||||
// we never checked cache validity
|
||||
HttpConnError::TooManyConnectionAttempts(_) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReportableError for LocalProxyConnError {
|
||||
fn get_error_kind(&self) -> ErrorKind {
|
||||
match self {
|
||||
LocalProxyConnError::Io(_) => ErrorKind::Compute,
|
||||
LocalProxyConnError::H2(_) => ErrorKind::Compute,
|
||||
}
|
||||
}
|
||||
@@ -488,209 +509,3 @@ impl UserFacingError for LocalProxyConnError {
|
||||
"Could not establish HTTP connection to the database".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl CouldRetry for LocalProxyConnError {
|
||||
fn could_retry(&self) -> bool {
|
||||
match self {
|
||||
LocalProxyConnError::Io(_) => false,
|
||||
LocalProxyConnError::H2(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl ShouldRetryWakeCompute for LocalProxyConnError {
|
||||
fn should_retry_wake_compute(&self) -> bool {
|
||||
match self {
|
||||
LocalProxyConnError::Io(_) => false,
|
||||
LocalProxyConnError::H2(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct TokioMechanism {
|
||||
pool: Arc<GlobalConnPool<postgres_client::Client, EndpointConnPool<postgres_client::Client>>>,
|
||||
conn_info: ConnInfo,
|
||||
conn_id: uuid::Uuid,
|
||||
keys: ComputeCredentialKeys,
|
||||
|
||||
/// connect_to_compute concurrency lock
|
||||
locks: &'static ApiLocks<Host>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ConnectMechanism for TokioMechanism {
|
||||
type Connection = Client<postgres_client::Client>;
|
||||
type ConnectError = HttpConnError;
|
||||
type Error = HttpConnError;
|
||||
|
||||
async fn connect_once(
|
||||
&self,
|
||||
ctx: &RequestContext,
|
||||
node_info: &CachedNodeInfo,
|
||||
compute_config: &ComputeConfig,
|
||||
) -> Result<Self::Connection, Self::ConnectError> {
|
||||
let permit = self.locks.get_permit(&node_info.conn_info.host).await?;
|
||||
|
||||
let mut config = node_info.conn_info.to_postgres_client_config();
|
||||
let config = config
|
||||
.user(&self.conn_info.user_info.user)
|
||||
.dbname(&self.conn_info.dbname)
|
||||
.connect_timeout(compute_config.timeout);
|
||||
|
||||
if let ComputeCredentialKeys::AuthKeys(auth_keys) = self.keys {
|
||||
config.auth_keys(auth_keys);
|
||||
}
|
||||
|
||||
let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
|
||||
let res = config.connect(compute_config).await;
|
||||
drop(pause);
|
||||
let (client, connection) = permit.release_result(res)?;
|
||||
|
||||
tracing::Span::current().record("pid", tracing::field::display(client.get_process_id()));
|
||||
tracing::Span::current().record(
|
||||
"compute_id",
|
||||
tracing::field::display(&node_info.aux.compute_id),
|
||||
);
|
||||
|
||||
if let Some(query_id) = ctx.get_testodrome_id() {
|
||||
info!("latency={}, query_id={}", ctx.get_proxy_latency(), query_id);
|
||||
}
|
||||
|
||||
Ok(poll_client(
|
||||
self.pool.clone(),
|
||||
ctx,
|
||||
self.conn_info.clone(),
|
||||
client,
|
||||
connection,
|
||||
self.conn_id,
|
||||
node_info.aux.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct HyperMechanism {
|
||||
pool: Arc<GlobalConnPool<Send, HttpConnPool<Send>>>,
|
||||
conn_info: ConnInfo,
|
||||
conn_id: uuid::Uuid,
|
||||
|
||||
/// connect_to_compute concurrency lock
|
||||
locks: &'static ApiLocks<Host>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ConnectMechanism for HyperMechanism {
|
||||
type Connection = http_conn_pool::Client<Send>;
|
||||
type ConnectError = HttpConnError;
|
||||
type Error = HttpConnError;
|
||||
|
||||
async fn connect_once(
|
||||
&self,
|
||||
ctx: &RequestContext,
|
||||
node_info: &CachedNodeInfo,
|
||||
config: &ComputeConfig,
|
||||
) -> Result<Self::Connection, Self::ConnectError> {
|
||||
let host_addr = node_info.conn_info.host_addr;
|
||||
let host = &node_info.conn_info.host;
|
||||
let permit = self.locks.get_permit(host).await?;
|
||||
|
||||
let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
|
||||
|
||||
let tls = if node_info.conn_info.ssl_mode == SslMode::Disable {
|
||||
None
|
||||
} else {
|
||||
Some(&config.tls)
|
||||
};
|
||||
|
||||
let port = node_info.conn_info.port;
|
||||
let res = connect_http2(host_addr, host, port, config.timeout, tls).await;
|
||||
drop(pause);
|
||||
let (client, connection) = permit.release_result(res)?;
|
||||
|
||||
tracing::Span::current().record(
|
||||
"compute_id",
|
||||
tracing::field::display(&node_info.aux.compute_id),
|
||||
);
|
||||
|
||||
if let Some(query_id) = ctx.get_testodrome_id() {
|
||||
info!("latency={}, query_id={}", ctx.get_proxy_latency(), query_id);
|
||||
}
|
||||
|
||||
Ok(poll_http2_client(
|
||||
self.pool.clone(),
|
||||
ctx,
|
||||
&self.conn_info,
|
||||
client,
|
||||
connection,
|
||||
self.conn_id,
|
||||
node_info.aux.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
async fn connect_http2(
|
||||
host_addr: Option<IpAddr>,
|
||||
host: &str,
|
||||
port: u16,
|
||||
timeout: Duration,
|
||||
tls: Option<&Arc<rustls::ClientConfig>>,
|
||||
) -> Result<(http_conn_pool::Send, http_conn_pool::Connect), LocalProxyConnError> {
|
||||
let addrs = match host_addr {
|
||||
Some(addr) => vec![SocketAddr::new(addr, port)],
|
||||
None => lookup_host((host, port))
|
||||
.await
|
||||
.map_err(LocalProxyConnError::Io)?
|
||||
.collect(),
|
||||
};
|
||||
let mut last_err = None;
|
||||
|
||||
let mut addrs = addrs.into_iter();
|
||||
let stream = loop {
|
||||
let Some(addr) = addrs.next() else {
|
||||
return Err(last_err.unwrap_or_else(|| {
|
||||
LocalProxyConnError::Io(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"could not resolve any addresses",
|
||||
))
|
||||
}));
|
||||
};
|
||||
|
||||
match tokio::time::timeout(timeout, TcpStream::connect(addr)).await {
|
||||
Ok(Ok(stream)) => {
|
||||
stream.set_nodelay(true).map_err(LocalProxyConnError::Io)?;
|
||||
break stream;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
last_err = Some(LocalProxyConnError::Io(e));
|
||||
}
|
||||
Err(e) => {
|
||||
last_err = Some(LocalProxyConnError::Io(io::Error::new(
|
||||
io::ErrorKind::TimedOut,
|
||||
e,
|
||||
)));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let stream = if let Some(tls) = tls {
|
||||
let host = DnsName::try_from(host)
|
||||
.map_err(io::Error::other)
|
||||
.map_err(LocalProxyConnError::Io)?
|
||||
.to_owned();
|
||||
let stream = TlsConnector::from(tls.clone())
|
||||
.connect(ServerName::DnsName(host), stream)
|
||||
.await
|
||||
.map_err(LocalProxyConnError::Io)?;
|
||||
Box::pin(stream) as AsyncRW
|
||||
} else {
|
||||
Box::pin(stream) as AsyncRW
|
||||
};
|
||||
|
||||
let (client, connection) = hyper::client::conn::http2::Builder::new(TokioExecutor::new())
|
||||
.timer(TokioTimer::new())
|
||||
.keep_alive_interval(Duration::from_secs(20))
|
||||
.keep_alive_while_idle(true)
|
||||
.keep_alive_timeout(Duration::from_secs(5))
|
||||
.handshake(TokioIo::new(stream))
|
||||
.await?;
|
||||
|
||||
Ok((client, connection))
|
||||
}
|
||||
|
||||
@@ -190,6 +190,9 @@ mod tests {
|
||||
fn get_process_id(&self) -> i32 {
|
||||
0
|
||||
}
|
||||
fn reset(&mut self) -> Result<(), postgres_client::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn create_inner() -> ClientInnerCommon<MockClient> {
|
||||
|
||||
@@ -7,10 +7,9 @@ use std::time::Duration;
|
||||
|
||||
use clashmap::ClashMap;
|
||||
use parking_lot::RwLock;
|
||||
use postgres_client::ReadyForQueryStatus;
|
||||
use rand::Rng;
|
||||
use smol_str::ToSmolStr;
|
||||
use tracing::{Span, debug, info};
|
||||
use tracing::{Span, debug, info, warn};
|
||||
|
||||
use super::backend::HttpConnError;
|
||||
use super::conn_pool::ClientDataRemote;
|
||||
@@ -188,7 +187,7 @@ impl<C: ClientInnerExt> EndpointConnPool<C> {
|
||||
self.pools.get_mut(&db_user)
|
||||
}
|
||||
|
||||
pub(crate) fn put(pool: &RwLock<Self>, conn_info: &ConnInfo, client: ClientInnerCommon<C>) {
|
||||
pub(crate) fn put(pool: &RwLock<Self>, conn_info: &ConnInfo, mut client: ClientInnerCommon<C>) {
|
||||
let conn_id = client.get_conn_id();
|
||||
let (max_conn, conn_count, pool_name) = {
|
||||
let pool = pool.read();
|
||||
@@ -201,12 +200,17 @@ impl<C: ClientInnerExt> EndpointConnPool<C> {
|
||||
};
|
||||
|
||||
if client.inner.is_closed() {
|
||||
info!(%conn_id, "{}: throwing away connection '{conn_info}' because connection is closed", pool_name);
|
||||
info!(%conn_id, "{pool_name}: throwing away connection '{conn_info}' because connection is closed");
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(error) = client.inner.reset() {
|
||||
warn!(?error, %conn_id, "{pool_name}: throwing away connection '{conn_info}' because connection could not be reset");
|
||||
return;
|
||||
}
|
||||
|
||||
if conn_count >= max_conn {
|
||||
info!(%conn_id, "{}: throwing away connection '{conn_info}' because pool is full", pool_name);
|
||||
info!(%conn_id, "{pool_name}: throwing away connection '{conn_info}' because pool is full");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -691,6 +695,7 @@ impl<C: ClientInnerExt> Deref for Client<C> {
|
||||
pub(crate) trait ClientInnerExt: Sync + Send + 'static {
|
||||
fn is_closed(&self) -> bool;
|
||||
fn get_process_id(&self) -> i32;
|
||||
fn reset(&mut self) -> Result<(), postgres_client::Error>;
|
||||
}
|
||||
|
||||
impl ClientInnerExt for postgres_client::Client {
|
||||
@@ -701,15 +706,13 @@ impl ClientInnerExt for postgres_client::Client {
|
||||
fn get_process_id(&self) -> i32 {
|
||||
self.get_process_id()
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Result<(), postgres_client::Error> {
|
||||
self.reset_session_background()
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: ClientInnerExt> Discard<'_, C> {
|
||||
pub(crate) fn check_idle(&mut self, status: ReadyForQueryStatus) {
|
||||
let conn_info = &self.conn_info;
|
||||
if status != ReadyForQueryStatus::Idle && std::mem::take(self.pool).strong_count() > 0 {
|
||||
info!("pool: throwing away connection '{conn_info}' because connection is not idle");
|
||||
}
|
||||
}
|
||||
pub(crate) fn discard(&mut self) {
|
||||
let conn_info = &self.conn_info;
|
||||
if std::mem::take(self.pool).strong_count() > 0 {
|
||||
|
||||
@@ -23,8 +23,8 @@ use crate::protocol2::ConnectionInfoExtra;
|
||||
use crate::types::EndpointCacheKey;
|
||||
use crate::usage_metrics::{Ids, MetricCounter, USAGE_METRICS};
|
||||
|
||||
pub(crate) type Send = http2::SendRequest<BoxBody<Bytes, hyper::Error>>;
|
||||
pub(crate) type Connect =
|
||||
pub(crate) type LocalProxyClient = http2::SendRequest<BoxBody<Bytes, hyper::Error>>;
|
||||
pub(crate) type LocalProxyConnection =
|
||||
http2::Connection<TokioIo<AsyncRW>, BoxBody<Bytes, hyper::Error>, TokioExecutor>;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -189,14 +189,14 @@ impl<C: ClientInnerExt + Clone> GlobalConnPool<C, HttpConnPool<C>> {
|
||||
}
|
||||
|
||||
pub(crate) fn poll_http2_client(
|
||||
global_pool: Arc<GlobalConnPool<Send, HttpConnPool<Send>>>,
|
||||
global_pool: Arc<GlobalConnPool<LocalProxyClient, HttpConnPool<LocalProxyClient>>>,
|
||||
ctx: &RequestContext,
|
||||
conn_info: &ConnInfo,
|
||||
client: Send,
|
||||
connection: Connect,
|
||||
client: LocalProxyClient,
|
||||
connection: LocalProxyConnection,
|
||||
conn_id: uuid::Uuid,
|
||||
aux: MetricsAuxInfo,
|
||||
) -> Client<Send> {
|
||||
) -> Client<LocalProxyClient> {
|
||||
let conn_gauge = Metrics::get().proxy.db_connections.guard(ctx.protocol());
|
||||
let session_id = ctx.session_id();
|
||||
|
||||
@@ -285,7 +285,7 @@ impl<C: ClientInnerExt + Clone> Client<C> {
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientInnerExt for Send {
|
||||
impl ClientInnerExt for LocalProxyClient {
|
||||
fn is_closed(&self) -> bool {
|
||||
self.is_closed()
|
||||
}
|
||||
@@ -294,4 +294,10 @@ impl ClientInnerExt for Send {
|
||||
// ideally throw something meaningful
|
||||
-1
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Result<(), postgres_client::Error> {
|
||||
// We use HTTP/2.0 to talk to local proxy. HTTP is stateless,
|
||||
// so there's nothing to reset.
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,11 +269,6 @@ impl ClientInnerCommon<postgres_client::Client> {
|
||||
local_data.jti += 1;
|
||||
let token = resign_jwt(&local_data.key, payload, local_data.jti)?;
|
||||
|
||||
self.inner
|
||||
.discard_all()
|
||||
.await
|
||||
.map_err(SqlOverHttpError::InternalPostgres)?;
|
||||
|
||||
// initiates the auth session
|
||||
// this is safe from query injections as the jwt format free of any escape characters.
|
||||
let query = format!("select auth.jwt_session_init('{token}')");
|
||||
|
||||
@@ -46,7 +46,7 @@ use super::backend::{HttpConnError, LocalProxyConnError, PoolingBackend};
|
||||
use super::conn_pool::AuthData;
|
||||
use super::conn_pool_lib::ConnInfo;
|
||||
use super::error::{ConnInfoError, Credentials, HttpCodeError, ReadPayloadError};
|
||||
use super::http_conn_pool::{self, Send};
|
||||
use super::http_conn_pool::{self, LocalProxyClient};
|
||||
use super::http_util::{
|
||||
ALLOW_POOL, CONN_STRING, NEON_REQUEST_ID, RAW_TEXT_OUTPUT, TXN_ISOLATION_LEVEL, TXN_READ_ONLY,
|
||||
get_conn_info, json_response, uuid_to_header_value,
|
||||
@@ -145,7 +145,7 @@ impl DbSchemaCache {
|
||||
endpoint_id: &EndpointCacheKey,
|
||||
auth_header: &HeaderValue,
|
||||
connection_string: &str,
|
||||
client: &mut http_conn_pool::Client<Send>,
|
||||
client: &mut http_conn_pool::Client<LocalProxyClient>,
|
||||
ctx: &RequestContext,
|
||||
config: &'static ProxyConfig,
|
||||
) -> Result<Arc<(ApiConfig, DbSchemaOwned)>, RestError> {
|
||||
@@ -190,7 +190,7 @@ impl DbSchemaCache {
|
||||
&self,
|
||||
auth_header: &HeaderValue,
|
||||
connection_string: &str,
|
||||
client: &mut http_conn_pool::Client<Send>,
|
||||
client: &mut http_conn_pool::Client<LocalProxyClient>,
|
||||
ctx: &RequestContext,
|
||||
config: &'static ProxyConfig,
|
||||
) -> Result<(ApiConfig, DbSchemaOwned), RestError> {
|
||||
@@ -430,7 +430,7 @@ struct BatchQueryData<'a> {
|
||||
}
|
||||
|
||||
async fn make_local_proxy_request<S: DeserializeOwned>(
|
||||
client: &mut http_conn_pool::Client<Send>,
|
||||
client: &mut http_conn_pool::Client<LocalProxyClient>,
|
||||
headers: impl IntoIterator<Item = (&HeaderName, HeaderValue)>,
|
||||
body: QueryData<'_>,
|
||||
max_len: usize,
|
||||
@@ -461,7 +461,7 @@ async fn make_local_proxy_request<S: DeserializeOwned>(
|
||||
}
|
||||
|
||||
async fn make_raw_local_proxy_request(
|
||||
client: &mut http_conn_pool::Client<Send>,
|
||||
client: &mut http_conn_pool::Client<LocalProxyClient>,
|
||||
headers: impl IntoIterator<Item = (&HeaderName, HeaderValue)>,
|
||||
body: String,
|
||||
) -> Result<Response<Incoming>, RestError> {
|
||||
|
||||
@@ -735,9 +735,7 @@ impl QueryData {
|
||||
|
||||
match batch_result {
|
||||
// The query successfully completed.
|
||||
Ok(status) => {
|
||||
discard.check_idle(status);
|
||||
|
||||
Ok(_) => {
|
||||
let json_output = String::from_utf8(json_buf).expect("json should be valid utf8");
|
||||
Ok(json_output)
|
||||
}
|
||||
@@ -793,7 +791,7 @@ impl BatchQueryData {
|
||||
{
|
||||
Ok(json_output) => {
|
||||
info!("commit");
|
||||
let status = transaction
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.inspect_err(|_| {
|
||||
@@ -802,7 +800,6 @@ impl BatchQueryData {
|
||||
discard.discard();
|
||||
})
|
||||
.map_err(SqlOverHttpError::Postgres)?;
|
||||
discard.check_idle(status);
|
||||
json_output
|
||||
}
|
||||
Err(SqlOverHttpError::Cancelled(_)) => {
|
||||
@@ -815,17 +812,6 @@ impl BatchQueryData {
|
||||
return Err(SqlOverHttpError::Cancelled(SqlOverHttpCancel::Postgres));
|
||||
}
|
||||
Err(err) => {
|
||||
info!("rollback");
|
||||
let status = transaction
|
||||
.rollback()
|
||||
.await
|
||||
.inspect_err(|_| {
|
||||
// if we cannot rollback - for now don't return connection to pool
|
||||
// TODO: get a query status from the error
|
||||
discard.discard();
|
||||
})
|
||||
.map_err(SqlOverHttpError::Postgres)?;
|
||||
discard.check_idle(status);
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
@@ -1012,12 +998,6 @@ impl Client {
|
||||
}
|
||||
|
||||
impl Discard<'_> {
|
||||
fn check_idle(&mut self, status: ReadyForQueryStatus) {
|
||||
match self {
|
||||
Discard::Remote(discard) => discard.check_idle(status),
|
||||
Discard::Local(discard) => discard.check_idle(status),
|
||||
}
|
||||
}
|
||||
fn discard(&mut self) {
|
||||
match self {
|
||||
Discard::Remote(discard) => discard.discard(),
|
||||
|
||||
@@ -249,6 +249,10 @@ impl IntentState {
|
||||
}
|
||||
|
||||
pub(crate) fn push_secondary(&mut self, scheduler: &mut Scheduler, new_secondary: NodeId) {
|
||||
// Every assertion here should probably have a corresponding check in
|
||||
// `validate_optimization` unless it is an invariant that should never be violated. Note
|
||||
// that the lock is not held between planning optimizations and applying them so you have to
|
||||
// assume any valid state transition of the intent state may have occurred
|
||||
assert!(!self.secondary.contains(&new_secondary));
|
||||
assert!(self.attached != Some(new_secondary));
|
||||
scheduler.update_node_ref_counts(
|
||||
@@ -1335,8 +1339,9 @@ impl TenantShard {
|
||||
true
|
||||
}
|
||||
|
||||
/// Check that the desired modifications to the intent state are compatible with
|
||||
/// the current intent state
|
||||
/// Check that the desired modifications to the intent state are compatible with the current
|
||||
/// intent state. Note that the lock is not held between planning optimizations and applying
|
||||
/// them so any valid state transition of the intent state may have occurred.
|
||||
fn validate_optimization(&self, optimization: &ScheduleOptimization) -> bool {
|
||||
match optimization.action {
|
||||
ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
|
||||
@@ -1352,6 +1357,9 @@ impl TenantShard {
|
||||
}) => {
|
||||
// It's legal to remove a secondary that is not present in the intent state
|
||||
!self.intent.secondary.contains(&new_node_id)
|
||||
// Ensure the secondary hasn't already been promoted to attached by a concurrent
|
||||
// optimization/migration.
|
||||
&& self.intent.attached != Some(new_node_id)
|
||||
}
|
||||
ScheduleOptimizationAction::CreateSecondary(new_node_id) => {
|
||||
!self.intent.secondary.contains(&new_node_id)
|
||||
|
||||
@@ -87,9 +87,10 @@ class EndpointHttpClient(requests.Session):
|
||||
def prewarmed():
|
||||
json = self.prewarm_lfc_status()
|
||||
status, err = json["status"], json.get("error")
|
||||
assert status == "completed", f"{status}, {err=}"
|
||||
assert status in ["failed", "completed", "skipped"], f"{status}, {err=}"
|
||||
|
||||
wait_until(prewarmed, timeout=60)
|
||||
assert self.prewarm_lfc_status()["status"] != "failed"
|
||||
|
||||
def offload_lfc_status(self) -> dict[str, str]:
|
||||
res = self.get(self.offload_url)
|
||||
@@ -105,19 +106,19 @@ class EndpointHttpClient(requests.Session):
|
||||
def offloaded():
|
||||
json = self.offload_lfc_status()
|
||||
status, err = json["status"], json.get("error")
|
||||
assert status == "completed", f"{status}, {err=}"
|
||||
assert status in ["failed", "completed"], f"{status}, {err=}"
|
||||
|
||||
wait_until(offloaded)
|
||||
assert self.offload_lfc_status()["status"] != "failed"
|
||||
|
||||
def promote(self, safekeepers_lsn: dict[str, Any], disconnect: bool = False):
|
||||
def promote(self, promote_spec: dict[str, Any], disconnect: bool = False):
|
||||
url = f"http://localhost:{self.external_port}/promote"
|
||||
if disconnect:
|
||||
try: # send first request to start promote and disconnect
|
||||
self.post(url, data=safekeepers_lsn, timeout=0.001)
|
||||
self.post(url, json=promote_spec, timeout=0.001)
|
||||
except ReadTimeout:
|
||||
pass # wait on second request which returns on promotion finish
|
||||
res = self.post(url, data=safekeepers_lsn)
|
||||
res.raise_for_status()
|
||||
res = self.post(url, json=promote_spec)
|
||||
json: dict[str, str] = res.json()
|
||||
return json
|
||||
|
||||
|
||||
@@ -212,11 +212,13 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
pg_version,
|
||||
]
|
||||
if conf is not None:
|
||||
args.extend(
|
||||
chain.from_iterable(
|
||||
product(["-c"], (f"{key}:{value}" for key, value in conf.items()))
|
||||
)
|
||||
)
|
||||
for key, value in conf.items():
|
||||
if isinstance(value, bool):
|
||||
args.extend(
|
||||
["-c", f"{key}:{str(value).lower()}"]
|
||||
) # only accepts true/false not True/False
|
||||
else:
|
||||
args.extend(["-c", f"{key}:{value}"])
|
||||
|
||||
if set_default:
|
||||
args.append("--set-default")
|
||||
@@ -588,7 +590,9 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
]
|
||||
extra_env_vars = env or {}
|
||||
if basebackup_request_tries is not None:
|
||||
extra_env_vars["NEON_COMPUTE_TESTING_BASEBACKUP_TRIES"] = str(basebackup_request_tries)
|
||||
extra_env_vars["NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES"] = str(
|
||||
basebackup_request_tries
|
||||
)
|
||||
if remote_ext_base_url is not None:
|
||||
args.extend(["--remote-ext-base-url", remote_ext_base_url])
|
||||
|
||||
@@ -624,6 +628,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
pageserver_id: int | None = None,
|
||||
safekeepers: list[int] | None = None,
|
||||
check_return_code=True,
|
||||
timeout_sec: float | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = ["endpoint", "reconfigure", endpoint_id]
|
||||
if tenant_id is not None:
|
||||
@@ -632,7 +637,16 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
args.extend(["--pageserver-id", str(pageserver_id)])
|
||||
if safekeepers is not None:
|
||||
args.extend(["--safekeepers", (",".join(map(str, safekeepers)))])
|
||||
return self.raw_cli(args, check_return_code=check_return_code)
|
||||
return self.raw_cli(args, check_return_code=check_return_code, timeout=timeout_sec)
|
||||
|
||||
def endpoint_refresh_configuration(
|
||||
self,
|
||||
endpoint_id: str,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = ["endpoint", "refresh-configuration", endpoint_id]
|
||||
res = self.raw_cli(args)
|
||||
res.check_returncode()
|
||||
return res
|
||||
|
||||
def endpoint_stop(
|
||||
self,
|
||||
@@ -658,6 +672,22 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
lsn: Lsn | None = None if lsn_str == "null" else Lsn(lsn_str)
|
||||
return lsn, proc
|
||||
|
||||
def endpoint_update_pageservers(
|
||||
self,
|
||||
endpoint_id: str,
|
||||
pageserver_id: int | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = [
|
||||
"endpoint",
|
||||
"update-pageservers",
|
||||
endpoint_id,
|
||||
]
|
||||
if pageserver_id is not None:
|
||||
args.extend(["--pageserver-id", str(pageserver_id)])
|
||||
res = self.raw_cli(args)
|
||||
res.check_returncode()
|
||||
return res
|
||||
|
||||
def mappings_map_branch(
|
||||
self, name: str, tenant_id: TenantId, timeline_id: TimelineId
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
|
||||
@@ -3912,6 +3912,41 @@ class NeonProxy(PgProtocol):
|
||||
assert response.status_code == expected_code, f"response: {response.json()}"
|
||||
return response.json()
|
||||
|
||||
def http_multiquery(self, *queries, **kwargs):
|
||||
# TODO maybe use default values if not provided
|
||||
user = quote(kwargs["user"])
|
||||
password = quote(kwargs["password"])
|
||||
expected_code = kwargs.get("expected_code")
|
||||
timeout = kwargs.get("timeout")
|
||||
|
||||
json_queries = []
|
||||
for query in queries:
|
||||
if type(query) is str:
|
||||
json_queries.append({"query": query})
|
||||
else:
|
||||
[query, params] = query
|
||||
json_queries.append({"query": query, "params": params})
|
||||
|
||||
queries_str = [j["query"] for j in json_queries]
|
||||
log.info(f"Executing http queries: {queries_str}")
|
||||
|
||||
connstr = f"postgresql://{user}:{password}@{self.domain}:{self.proxy_port}/postgres"
|
||||
response = requests.post(
|
||||
f"https://{self.domain}:{self.external_http_port}/sql",
|
||||
data=json.dumps({"queries": json_queries}),
|
||||
headers={
|
||||
"Content-Type": "application/sql",
|
||||
"Neon-Connection-String": connstr,
|
||||
"Neon-Pool-Opt-In": "true",
|
||||
},
|
||||
verify=str(self.test_output_dir / "proxy.crt"),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if expected_code is not None:
|
||||
assert response.status_code == expected_code, f"response: {response.json()}"
|
||||
return response.json()
|
||||
|
||||
async def http2_query(self, query, args, **kwargs):
|
||||
# TODO maybe use default values if not provided
|
||||
user = kwargs["user"]
|
||||
@@ -4763,9 +4798,10 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
m = re.search(r"=\s*(\S+)", line)
|
||||
assert m is not None, f"malformed config line {line}"
|
||||
size = m.group(1)
|
||||
assert size_to_bytes(size) >= size_to_bytes("1MB"), (
|
||||
"LFC size cannot be set less than 1MB"
|
||||
)
|
||||
if size_to_bytes(size) > 0:
|
||||
assert size_to_bytes(size) >= size_to_bytes("1MB"), (
|
||||
"LFC size cannot be set less than 1MB"
|
||||
)
|
||||
lfc_path_escaped = str(lfc_path).replace("'", "''")
|
||||
config_lines = [
|
||||
f"neon.file_cache_path = '{lfc_path_escaped}'",
|
||||
@@ -4898,15 +4934,38 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
def is_running(self):
|
||||
return self._running._value > 0
|
||||
|
||||
def reconfigure(self, pageserver_id: int | None = None, safekeepers: list[int] | None = None):
|
||||
def reconfigure(
|
||||
self,
|
||||
pageserver_id: int | None = None,
|
||||
safekeepers: list[int] | None = None,
|
||||
timeout_sec: float = 120,
|
||||
):
|
||||
assert self.endpoint_id is not None
|
||||
# If `safekeepers` is not None, they are remember them as active and use
|
||||
# in the following commands.
|
||||
if safekeepers is not None:
|
||||
self.active_safekeepers = safekeepers
|
||||
self.env.neon_cli.endpoint_reconfigure(
|
||||
self.endpoint_id, self.tenant_id, pageserver_id, self.active_safekeepers
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
self.env.neon_cli.endpoint_reconfigure(
|
||||
self.endpoint_id,
|
||||
self.tenant_id,
|
||||
pageserver_id,
|
||||
self.active_safekeepers,
|
||||
timeout_sec=timeout_sec,
|
||||
)
|
||||
return
|
||||
except RuntimeError as e:
|
||||
if time.time() - start_time > timeout_sec:
|
||||
raise e
|
||||
log.warning(f"Reconfigure failed with error: {e}. Retrying...")
|
||||
time.sleep(5)
|
||||
|
||||
def refresh_configuration(self):
|
||||
assert self.endpoint_id is not None
|
||||
self.env.neon_cli.endpoint_refresh_configuration(self.endpoint_id)
|
||||
|
||||
def respec(self, **kwargs: Any) -> None:
|
||||
"""Update the endpoint.json file used by control_plane."""
|
||||
@@ -4920,6 +4979,10 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
log.debug(json.dumps(dict(data_dict, **kwargs)))
|
||||
json.dump(dict(data_dict, **kwargs), file, indent=4)
|
||||
|
||||
def get_compute_spec(self) -> dict[str, Any]:
|
||||
out = json.loads((Path(self.endpoint_path()) / "config.json").read_text())["spec"]
|
||||
return cast("dict[str, Any]", out)
|
||||
|
||||
def respec_deep(self, **kwargs: Any) -> None:
|
||||
"""
|
||||
Update the endpoint.json file taking into account nested keys.
|
||||
@@ -4950,6 +5013,10 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
log.debug("Updating compute config to: %s", json.dumps(config, indent=4))
|
||||
json.dump(config, file, indent=4)
|
||||
|
||||
def update_pageservers_in_config(self, pageserver_id: int | None = None):
|
||||
assert self.endpoint_id is not None
|
||||
self.env.neon_cli.endpoint_update_pageservers(self.endpoint_id, pageserver_id)
|
||||
|
||||
def wait_for_migrations(self, wait_for: int = NUM_COMPUTE_MIGRATIONS) -> None:
|
||||
"""
|
||||
Wait for all compute migrations to be ran. Remember that migrations only
|
||||
|
||||
@@ -78,6 +78,9 @@ class Workload:
|
||||
"""
|
||||
if self._endpoint is not None:
|
||||
with ENDPOINT_LOCK:
|
||||
# It's important that we update config.json before issuing the reconfigure request to make sure
|
||||
# that PG-initiated spec refresh doesn't mess things up by reverting to the old spec.
|
||||
self._endpoint.update_pageservers_in_config()
|
||||
self._endpoint.reconfigure()
|
||||
|
||||
def endpoint(self, pageserver_id: int | None = None) -> Endpoint:
|
||||
@@ -97,10 +100,10 @@ class Workload:
|
||||
self._endpoint.start(pageserver_id=pageserver_id)
|
||||
self._configured_pageserver = pageserver_id
|
||||
else:
|
||||
if self._configured_pageserver != pageserver_id:
|
||||
self._configured_pageserver = pageserver_id
|
||||
self._endpoint.reconfigure(pageserver_id=pageserver_id)
|
||||
self._endpoint_config = pageserver_id
|
||||
# It's important that we update config.json before issuing the reconfigure request to make sure
|
||||
# that PG-initiated spec refresh doesn't mess things up by reverting to the old spec.
|
||||
self._endpoint.update_pageservers_in_config(pageserver_id=pageserver_id)
|
||||
self._endpoint.reconfigure(pageserver_id=pageserver_id)
|
||||
|
||||
connstring = self._endpoint.safe_psql(
|
||||
"SELECT setting FROM pg_settings WHERE name='neon.pageserver_connstring'"
|
||||
|
||||
@@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate TPS and latency charts from BenchBase TPC-C results CSV files.
|
||||
|
||||
This script reads a CSV file containing BenchBase results and generates two charts:
|
||||
1. TPS (requests per second) over time
|
||||
2. P95 and P99 latencies over time
|
||||
|
||||
Both charts are combined in a single SVG file.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt # type: ignore[import-not-found]
|
||||
import pandas as pd # type: ignore[import-untyped]
|
||||
|
||||
|
||||
def load_results_csv(csv_file_path):
|
||||
"""Load BenchBase results CSV file into a pandas DataFrame."""
|
||||
try:
|
||||
df = pd.read_csv(csv_file_path)
|
||||
|
||||
# Validate required columns exist
|
||||
required_columns = [
|
||||
"Time (seconds)",
|
||||
"Throughput (requests/second)",
|
||||
"95th Percentile Latency (millisecond)",
|
||||
"99th Percentile Latency (millisecond)",
|
||||
]
|
||||
|
||||
missing_columns = [col for col in required_columns if col not in df.columns]
|
||||
if missing_columns:
|
||||
print(f"Error: Missing required columns: {missing_columns}")
|
||||
sys.exit(1)
|
||||
|
||||
return df
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: CSV file not found: {csv_file_path}")
|
||||
sys.exit(1)
|
||||
except pd.errors.EmptyDataError:
|
||||
print(f"Error: CSV file is empty: {csv_file_path}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading CSV file: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def generate_charts(df, input_filename, output_svg_path, title_suffix=None):
|
||||
"""Generate combined TPS and latency charts and save as SVG."""
|
||||
|
||||
# Get the filename without extension for chart titles
|
||||
file_label = Path(input_filename).stem
|
||||
|
||||
# Build title ending with optional suffix
|
||||
if title_suffix:
|
||||
title_ending = f"{title_suffix} - {file_label}"
|
||||
else:
|
||||
title_ending = file_label
|
||||
|
||||
# Create figure with two subplots
|
||||
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
|
||||
|
||||
# Chart 1: Time vs TPS
|
||||
ax1.plot(
|
||||
df["Time (seconds)"],
|
||||
df["Throughput (requests/second)"],
|
||||
linewidth=1,
|
||||
color="blue",
|
||||
alpha=0.7,
|
||||
)
|
||||
ax1.set_xlabel("Time (seconds)")
|
||||
ax1.set_ylabel("TPS (Requests Per Second)")
|
||||
ax1.set_title(f"Benchbase TPC-C Like Throughput (TPS) - {title_ending}")
|
||||
ax1.grid(True, alpha=0.3)
|
||||
ax1.set_xlim(0, df["Time (seconds)"].max())
|
||||
|
||||
# Chart 2: Time vs P95 and P99 Latencies
|
||||
ax2.plot(
|
||||
df["Time (seconds)"],
|
||||
df["95th Percentile Latency (millisecond)"],
|
||||
linewidth=1,
|
||||
color="orange",
|
||||
alpha=0.7,
|
||||
label="Latency P95",
|
||||
)
|
||||
ax2.plot(
|
||||
df["Time (seconds)"],
|
||||
df["99th Percentile Latency (millisecond)"],
|
||||
linewidth=1,
|
||||
color="red",
|
||||
alpha=0.7,
|
||||
label="Latency P99",
|
||||
)
|
||||
ax2.set_xlabel("Time (seconds)")
|
||||
ax2.set_ylabel("Latency (ms)")
|
||||
ax2.set_title(f"Benchbase TPC-C Like Latency - {title_ending}")
|
||||
ax2.grid(True, alpha=0.3)
|
||||
ax2.set_xlim(0, df["Time (seconds)"].max())
|
||||
ax2.legend()
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# Save as SVG
|
||||
try:
|
||||
plt.savefig(output_svg_path, format="svg", dpi=300, bbox_inches="tight")
|
||||
print(f"Charts saved to: {output_svg_path}")
|
||||
except Exception as e:
|
||||
print(f"Error saving SVG file: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to parse arguments and generate charts."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate TPS and latency charts from BenchBase TPC-C results CSV"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--input-csv", type=str, required=True, help="Path to the input CSV results file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-svg", type=str, required=True, help="Path for the output SVG chart file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--title-suffix",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Optional suffix to add to chart titles (e.g., 'Warmup', 'Benchmark Phase')",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate input file exists
|
||||
if not Path(args.input_csv).exists():
|
||||
print(f"Error: Input CSV file does not exist: {args.input_csv}")
|
||||
sys.exit(1)
|
||||
|
||||
# Create output directory if it doesn't exist
|
||||
output_path = Path(args.output_svg)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load data and generate charts
|
||||
df = load_results_csv(args.input_csv)
|
||||
generate_charts(df, args.input_csv, args.output_svg, args.title_suffix)
|
||||
|
||||
print(f"Successfully generated charts from {len(df)} data points")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,339 @@
|
||||
import argparse
|
||||
import html
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
CONFIGS_DIR = Path("../configs")
|
||||
SCRIPTS_DIR = Path("../scripts")
|
||||
|
||||
# Constants
|
||||
## TODO increase times after testing
|
||||
WARMUP_TIME_SECONDS = 1200 # 20 minutes
|
||||
BENCHMARK_TIME_SECONDS = 3600 # 1 hour
|
||||
RAMP_STEP_TIME_SECONDS = 300 # 5 minutes
|
||||
BASE_TERMINALS = 130
|
||||
TERMINALS_PER_WAREHOUSE = 0.2
|
||||
OPTIMAL_RATE_FACTOR = 0.7 # 70% of max rate
|
||||
BATCH_SIZE = 1000
|
||||
LOADER_THREADS = 4
|
||||
TRANSACTION_WEIGHTS = "45,43,4,4,4" # NewOrder, Payment, OrderStatus, Delivery, StockLevel
|
||||
# Ramp-up rate multipliers
|
||||
RAMP_RATE_FACTORS = [1.5, 1.1, 0.9, 0.7, 0.6, 0.4, 0.6, 0.7, 0.9, 1.1]
|
||||
|
||||
# Templates for XML configs
|
||||
WARMUP_XML = """<?xml version="1.0"?>
|
||||
<parameters>
|
||||
<type>POSTGRES</type>
|
||||
<driver>org.postgresql.Driver</driver>
|
||||
<url>jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true</url>
|
||||
<username>neondb_owner</username>
|
||||
<password>{password}</password>
|
||||
<reconnectOnConnectionFailure>true</reconnectOnConnectionFailure>
|
||||
<isolation>TRANSACTION_READ_COMMITTED</isolation>
|
||||
<batchsize>{batch_size}</batchsize>
|
||||
<scalefactor>{warehouses}</scalefactor>
|
||||
<loaderThreads>0</loaderThreads>
|
||||
<terminals>{terminals}</terminals>
|
||||
<works>
|
||||
<work>
|
||||
<time>{warmup_time}</time>
|
||||
<weights>{transaction_weights}</weights>
|
||||
<rate>unlimited</rate>
|
||||
<arrival>POISSON</arrival>
|
||||
<distribution>ZIPFIAN</distribution>
|
||||
</work>
|
||||
</works>
|
||||
<transactiontypes>
|
||||
<transactiontype><name>NewOrder</name></transactiontype>
|
||||
<transactiontype><name>Payment</name></transactiontype>
|
||||
<transactiontype><name>OrderStatus</name></transactiontype>
|
||||
<transactiontype><name>Delivery</name></transactiontype>
|
||||
<transactiontype><name>StockLevel</name></transactiontype>
|
||||
</transactiontypes>
|
||||
</parameters>
|
||||
"""
|
||||
|
||||
MAX_RATE_XML = """<?xml version="1.0"?>
|
||||
<parameters>
|
||||
<type>POSTGRES</type>
|
||||
<driver>org.postgresql.Driver</driver>
|
||||
<url>jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true</url>
|
||||
<username>neondb_owner</username>
|
||||
<password>{password}</password>
|
||||
<reconnectOnConnectionFailure>true</reconnectOnConnectionFailure>
|
||||
<isolation>TRANSACTION_READ_COMMITTED</isolation>
|
||||
<batchsize>{batch_size}</batchsize>
|
||||
<scalefactor>{warehouses}</scalefactor>
|
||||
<loaderThreads>0</loaderThreads>
|
||||
<terminals>{terminals}</terminals>
|
||||
<works>
|
||||
<work>
|
||||
<time>{benchmark_time}</time>
|
||||
<weights>{transaction_weights}</weights>
|
||||
<rate>unlimited</rate>
|
||||
<arrival>POISSON</arrival>
|
||||
<distribution>ZIPFIAN</distribution>
|
||||
</work>
|
||||
</works>
|
||||
<transactiontypes>
|
||||
<transactiontype><name>NewOrder</name></transactiontype>
|
||||
<transactiontype><name>Payment</name></transactiontype>
|
||||
<transactiontype><name>OrderStatus</name></transactiontype>
|
||||
<transactiontype><name>Delivery</name></transactiontype>
|
||||
<transactiontype><name>StockLevel</name></transactiontype>
|
||||
</transactiontypes>
|
||||
</parameters>
|
||||
"""
|
||||
|
||||
OPT_RATE_XML = """<?xml version="1.0"?>
|
||||
<parameters>
|
||||
<type>POSTGRES</type>
|
||||
<driver>org.postgresql.Driver</driver>
|
||||
<url>jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true</url>
|
||||
<username>neondb_owner</username>
|
||||
<password>{password}</password>
|
||||
<reconnectOnConnectionFailure>true</reconnectOnConnectionFailure>
|
||||
<isolation>TRANSACTION_READ_COMMITTED</isolation>
|
||||
<batchsize>{batch_size}</batchsize>
|
||||
<scalefactor>{warehouses}</scalefactor>
|
||||
<loaderThreads>0</loaderThreads>
|
||||
<terminals>{terminals}</terminals>
|
||||
<works>
|
||||
<work>
|
||||
<time>{benchmark_time}</time>
|
||||
<rate>{opt_rate}</rate>
|
||||
<weights>{transaction_weights}</weights>
|
||||
<arrival>POISSON</arrival>
|
||||
<distribution>ZIPFIAN</distribution>
|
||||
</work>
|
||||
</works>
|
||||
<transactiontypes>
|
||||
<transactiontype><name>NewOrder</name></transactiontype>
|
||||
<transactiontype><name>Payment</name></transactiontype>
|
||||
<transactiontype><name>OrderStatus</name></transactiontype>
|
||||
<transactiontype><name>Delivery</name></transactiontype>
|
||||
<transactiontype><name>StockLevel</name></transactiontype>
|
||||
</transactiontypes>
|
||||
</parameters>
|
||||
"""
|
||||
|
||||
RAMP_UP_XML = """<?xml version="1.0"?>
|
||||
<parameters>
|
||||
<type>POSTGRES</type>
|
||||
<driver>org.postgresql.Driver</driver>
|
||||
<url>jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true</url>
|
||||
<username>neondb_owner</username>
|
||||
<password>{password}</password>
|
||||
<reconnectOnConnectionFailure>true</reconnectOnConnectionFailure>
|
||||
<isolation>TRANSACTION_READ_COMMITTED</isolation>
|
||||
<batchsize>{batch_size}</batchsize>
|
||||
<scalefactor>{warehouses}</scalefactor>
|
||||
<loaderThreads>0</loaderThreads>
|
||||
<terminals>{terminals}</terminals>
|
||||
<works>
|
||||
{works}
|
||||
</works>
|
||||
<transactiontypes>
|
||||
<transactiontype><name>NewOrder</name></transactiontype>
|
||||
<transactiontype><name>Payment</name></transactiontype>
|
||||
<transactiontype><name>OrderStatus</name></transactiontype>
|
||||
<transactiontype><name>Delivery</name></transactiontype>
|
||||
<transactiontype><name>StockLevel</name></transactiontype>
|
||||
</transactiontypes>
|
||||
</parameters>
|
||||
"""
|
||||
|
||||
WORK_TEMPLATE = f""" <work>\n <time>{RAMP_STEP_TIME_SECONDS}</time>\n <rate>{{rate}}</rate>\n <weights>{TRANSACTION_WEIGHTS}</weights>\n <arrival>POISSON</arrival>\n <distribution>ZIPFIAN</distribution>\n </work>\n"""
|
||||
|
||||
# Templates for shell scripts
|
||||
EXECUTE_SCRIPT = """# Create results directories
|
||||
mkdir -p results_warmup
|
||||
mkdir -p results_{suffix}
|
||||
chmod 777 results_warmup results_{suffix}
|
||||
|
||||
# Run warmup phase
|
||||
docker run --network=host --rm \
|
||||
-v $(pwd)/configs:/configs \
|
||||
-v $(pwd)/results_warmup:/results \
|
||||
{docker_image}\
|
||||
-b tpcc \
|
||||
-c /configs/execute_{warehouses}_warehouses_warmup.xml \
|
||||
-d /results \
|
||||
--create=false --load=false --execute=true
|
||||
|
||||
# Run benchmark phase
|
||||
docker run --network=host --rm \
|
||||
-v $(pwd)/configs:/configs \
|
||||
-v $(pwd)/results_{suffix}:/results \
|
||||
{docker_image}\
|
||||
-b tpcc \
|
||||
-c /configs/execute_{warehouses}_warehouses_{suffix}.xml \
|
||||
-d /results \
|
||||
--create=false --load=false --execute=true\n"""
|
||||
|
||||
LOAD_XML = """<?xml version="1.0"?>
|
||||
<parameters>
|
||||
<type>POSTGRES</type>
|
||||
<driver>org.postgresql.Driver</driver>
|
||||
<url>jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true</url>
|
||||
<username>neondb_owner</username>
|
||||
<password>{password}</password>
|
||||
<reconnectOnConnectionFailure>true</reconnectOnConnectionFailure>
|
||||
<isolation>TRANSACTION_READ_COMMITTED</isolation>
|
||||
<batchsize>{batch_size}</batchsize>
|
||||
<scalefactor>{warehouses}</scalefactor>
|
||||
<loaderThreads>{loader_threads}</loaderThreads>
|
||||
</parameters>
|
||||
"""
|
||||
|
||||
LOAD_SCRIPT = """# Create results directory for loading
|
||||
mkdir -p results_load
|
||||
chmod 777 results_load
|
||||
|
||||
docker run --network=host --rm \
|
||||
-v $(pwd)/configs:/configs \
|
||||
-v $(pwd)/results_load:/results \
|
||||
{docker_image}\
|
||||
-b tpcc \
|
||||
-c /configs/load_{warehouses}_warehouses.xml \
|
||||
-d /results \
|
||||
--create=true --load=true --execute=false\n"""
|
||||
|
||||
|
||||
def write_file(path, content):
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
with open(path, "w") as f:
|
||||
f.write(content)
|
||||
except OSError as e:
|
||||
print(f"Error writing {path}: {e}")
|
||||
sys.exit(1)
|
||||
# If it's a shell script, set executable permission
|
||||
if str(path).endswith(".sh"):
|
||||
os.chmod(path, 0o755)
|
||||
|
||||
|
||||
def escape_xml_password(password):
|
||||
"""Escape XML special characters in password."""
|
||||
return html.escape(password, quote=True)
|
||||
|
||||
|
||||
def get_docker_arch_tag(runner_arch):
|
||||
"""Map GitHub Actions runner.arch to Docker image architecture tag."""
|
||||
arch_mapping = {"X64": "amd64", "ARM64": "arm64"}
|
||||
return arch_mapping.get(runner_arch, "amd64") # Default to amd64
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Generate BenchBase workload configs and scripts.")
|
||||
parser.add_argument("--warehouses", type=int, required=True, help="Number of warehouses")
|
||||
parser.add_argument("--max-rate", type=int, required=True, help="Max rate (TPS)")
|
||||
parser.add_argument("--hostname", type=str, required=True, help="Database hostname")
|
||||
parser.add_argument("--password", type=str, required=True, help="Database password")
|
||||
parser.add_argument(
|
||||
"--runner-arch", type=str, required=True, help="GitHub Actions runner architecture"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
warehouses = args.warehouses
|
||||
max_rate = args.max_rate
|
||||
hostname = args.hostname
|
||||
password = args.password
|
||||
runner_arch = args.runner_arch
|
||||
|
||||
# Escape password for safe XML insertion
|
||||
escaped_password = escape_xml_password(password)
|
||||
|
||||
# Get the appropriate Docker architecture tag
|
||||
docker_arch = get_docker_arch_tag(runner_arch)
|
||||
docker_image = f"ghcr.io/neondatabase-labs/benchbase-postgres:latest-{docker_arch}"
|
||||
|
||||
opt_rate = math.ceil(max_rate * OPTIMAL_RATE_FACTOR)
|
||||
# Calculate terminals as next rounded integer of 40% of warehouses
|
||||
terminals = math.ceil(BASE_TERMINALS + warehouses * TERMINALS_PER_WAREHOUSE)
|
||||
ramp_rates = [math.ceil(max_rate * factor) for factor in RAMP_RATE_FACTORS]
|
||||
|
||||
# Write configs
|
||||
write_file(
|
||||
CONFIGS_DIR / f"execute_{warehouses}_warehouses_warmup.xml",
|
||||
WARMUP_XML.format(
|
||||
warehouses=warehouses,
|
||||
hostname=hostname,
|
||||
password=escaped_password,
|
||||
terminals=terminals,
|
||||
batch_size=BATCH_SIZE,
|
||||
warmup_time=WARMUP_TIME_SECONDS,
|
||||
transaction_weights=TRANSACTION_WEIGHTS,
|
||||
),
|
||||
)
|
||||
write_file(
|
||||
CONFIGS_DIR / f"execute_{warehouses}_warehouses_max_rate.xml",
|
||||
MAX_RATE_XML.format(
|
||||
warehouses=warehouses,
|
||||
hostname=hostname,
|
||||
password=escaped_password,
|
||||
terminals=terminals,
|
||||
batch_size=BATCH_SIZE,
|
||||
benchmark_time=BENCHMARK_TIME_SECONDS,
|
||||
transaction_weights=TRANSACTION_WEIGHTS,
|
||||
),
|
||||
)
|
||||
write_file(
|
||||
CONFIGS_DIR / f"execute_{warehouses}_warehouses_opt_rate.xml",
|
||||
OPT_RATE_XML.format(
|
||||
warehouses=warehouses,
|
||||
opt_rate=opt_rate,
|
||||
hostname=hostname,
|
||||
password=escaped_password,
|
||||
terminals=terminals,
|
||||
batch_size=BATCH_SIZE,
|
||||
benchmark_time=BENCHMARK_TIME_SECONDS,
|
||||
transaction_weights=TRANSACTION_WEIGHTS,
|
||||
),
|
||||
)
|
||||
|
||||
ramp_works = "".join([WORK_TEMPLATE.format(rate=rate) for rate in ramp_rates])
|
||||
write_file(
|
||||
CONFIGS_DIR / f"execute_{warehouses}_warehouses_ramp_up.xml",
|
||||
RAMP_UP_XML.format(
|
||||
warehouses=warehouses,
|
||||
works=ramp_works,
|
||||
hostname=hostname,
|
||||
password=escaped_password,
|
||||
terminals=terminals,
|
||||
batch_size=BATCH_SIZE,
|
||||
),
|
||||
)
|
||||
|
||||
# Loader config
|
||||
write_file(
|
||||
CONFIGS_DIR / f"load_{warehouses}_warehouses.xml",
|
||||
LOAD_XML.format(
|
||||
warehouses=warehouses,
|
||||
hostname=hostname,
|
||||
password=escaped_password,
|
||||
batch_size=BATCH_SIZE,
|
||||
loader_threads=LOADER_THREADS,
|
||||
),
|
||||
)
|
||||
|
||||
# Write scripts
|
||||
for suffix in ["max_rate", "opt_rate", "ramp_up"]:
|
||||
script = EXECUTE_SCRIPT.format(
|
||||
warehouses=warehouses, suffix=suffix, docker_image=docker_image
|
||||
)
|
||||
write_file(SCRIPTS_DIR / f"execute_{warehouses}_warehouses_{suffix}.sh", script)
|
||||
|
||||
# Loader script
|
||||
write_file(
|
||||
SCRIPTS_DIR / f"load_{warehouses}_warehouses.sh",
|
||||
LOAD_SCRIPT.format(warehouses=warehouses, docker_image=docker_image),
|
||||
)
|
||||
|
||||
print(f"Generated configs and scripts for {warehouses} warehouses and max rate {max_rate}.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,591 @@
|
||||
#!/usr/bin/env python3
|
||||
# ruff: noqa
|
||||
# we exclude the file from ruff because on the github runner we have python 3.9 and ruff
|
||||
# is running with newer python 3.12 which suggests changes incompatible with python 3.9
|
||||
"""
|
||||
Upload BenchBase TPC-C results from summary.json and results.csv files to perf_test_results database.
|
||||
|
||||
This script extracts metrics from BenchBase *.summary.json and *.results.csv files and uploads them
|
||||
to a PostgreSQL database table for performance tracking and analysis.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd # type: ignore[import-untyped]
|
||||
import psycopg2
|
||||
|
||||
|
||||
def load_summary_json(json_file_path):
|
||||
"""Load summary.json file and return parsed data."""
|
||||
try:
|
||||
with open(json_file_path) as f:
|
||||
return json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: Summary JSON file not found: {json_file_path}")
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error: Invalid JSON in file {json_file_path}: {e}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error loading JSON file {json_file_path}: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_metric_info(metric_name):
|
||||
"""Get metric unit and report type for a given metric name."""
|
||||
metrics_config = {
|
||||
"Throughput": {"unit": "req/s", "report_type": "higher_is_better"},
|
||||
"Goodput": {"unit": "req/s", "report_type": "higher_is_better"},
|
||||
"Measured Requests": {"unit": "requests", "report_type": "higher_is_better"},
|
||||
"95th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"Maximum Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"Median Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"Minimum Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"25th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"90th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"99th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"75th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
"Average Latency": {"unit": "µs", "report_type": "lower_is_better"},
|
||||
}
|
||||
|
||||
return metrics_config.get(metric_name, {"unit": "", "report_type": "higher_is_better"})
|
||||
|
||||
|
||||
def extract_metrics(summary_data):
|
||||
"""Extract relevant metrics from summary JSON data."""
|
||||
metrics = []
|
||||
|
||||
# Direct top-level metrics
|
||||
direct_metrics = {
|
||||
"Throughput (requests/second)": "Throughput",
|
||||
"Goodput (requests/second)": "Goodput",
|
||||
"Measured Requests": "Measured Requests",
|
||||
}
|
||||
|
||||
for json_key, clean_name in direct_metrics.items():
|
||||
if json_key in summary_data:
|
||||
metrics.append((clean_name, summary_data[json_key]))
|
||||
|
||||
# Latency metrics from nested "Latency Distribution" object
|
||||
if "Latency Distribution" in summary_data:
|
||||
latency_data = summary_data["Latency Distribution"]
|
||||
latency_metrics = {
|
||||
"95th Percentile Latency (microseconds)": "95th Percentile Latency",
|
||||
"Maximum Latency (microseconds)": "Maximum Latency",
|
||||
"Median Latency (microseconds)": "Median Latency",
|
||||
"Minimum Latency (microseconds)": "Minimum Latency",
|
||||
"25th Percentile Latency (microseconds)": "25th Percentile Latency",
|
||||
"90th Percentile Latency (microseconds)": "90th Percentile Latency",
|
||||
"99th Percentile Latency (microseconds)": "99th Percentile Latency",
|
||||
"75th Percentile Latency (microseconds)": "75th Percentile Latency",
|
||||
"Average Latency (microseconds)": "Average Latency",
|
||||
}
|
||||
|
||||
for json_key, clean_name in latency_metrics.items():
|
||||
if json_key in latency_data:
|
||||
metrics.append((clean_name, latency_data[json_key]))
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def build_labels(summary_data, project_id):
|
||||
"""Build labels JSON object from summary data and project info."""
|
||||
labels = {}
|
||||
|
||||
# Extract required label keys from summary data
|
||||
label_keys = [
|
||||
"DBMS Type",
|
||||
"DBMS Version",
|
||||
"Benchmark Type",
|
||||
"Final State",
|
||||
"isolation",
|
||||
"scalefactor",
|
||||
"terminals",
|
||||
]
|
||||
|
||||
for key in label_keys:
|
||||
if key in summary_data:
|
||||
labels[key] = summary_data[key]
|
||||
|
||||
# Add project_id from workflow
|
||||
labels["project_id"] = project_id
|
||||
|
||||
return labels
|
||||
|
||||
|
||||
def build_suit_name(scalefactor, terminals, run_type, min_cu, max_cu):
|
||||
"""Build the suit name according to specification."""
|
||||
return f"benchbase-tpc-c-{scalefactor}-{terminals}-{run_type}-{min_cu}-{max_cu}"
|
||||
|
||||
|
||||
def convert_timestamp_to_utc(timestamp_ms):
|
||||
"""Convert millisecond timestamp to PostgreSQL-compatible UTC timestamp."""
|
||||
try:
|
||||
dt = datetime.fromtimestamp(timestamp_ms / 1000.0, tz=timezone.utc)
|
||||
return dt.isoformat()
|
||||
except (ValueError, TypeError) as e:
|
||||
print(f"Warning: Could not convert timestamp {timestamp_ms}: {e}")
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def insert_metrics(conn, metrics_data):
|
||||
"""Insert metrics data into the perf_test_results table."""
|
||||
insert_query = """
|
||||
INSERT INTO perf_test_results
|
||||
(suit, revision, platform, metric_name, metric_value, metric_unit,
|
||||
metric_report_type, recorded_at_timestamp, labels)
|
||||
VALUES (%(suit)s, %(revision)s, %(platform)s, %(metric_name)s, %(metric_value)s,
|
||||
%(metric_unit)s, %(metric_report_type)s, %(recorded_at_timestamp)s, %(labels)s)
|
||||
"""
|
||||
|
||||
try:
|
||||
with conn.cursor() as cursor:
|
||||
cursor.executemany(insert_query, metrics_data)
|
||||
conn.commit()
|
||||
print(f"Successfully inserted {len(metrics_data)} metrics into perf_test_results")
|
||||
|
||||
# Log some sample data for verification
|
||||
if metrics_data:
|
||||
print(
|
||||
f"Sample metric: {metrics_data[0]['metric_name']} = {metrics_data[0]['metric_value']} {metrics_data[0]['metric_unit']}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error inserting metrics into database: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_benchbase_results_details_table(conn):
|
||||
"""Create benchbase_results_details table if it doesn't exist."""
|
||||
create_table_query = """
|
||||
CREATE TABLE IF NOT EXISTS benchbase_results_details (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
suit TEXT,
|
||||
revision CHAR(40),
|
||||
platform TEXT,
|
||||
recorded_at_timestamp TIMESTAMP WITH TIME ZONE,
|
||||
requests_per_second NUMERIC,
|
||||
average_latency_ms NUMERIC,
|
||||
minimum_latency_ms NUMERIC,
|
||||
p25_latency_ms NUMERIC,
|
||||
median_latency_ms NUMERIC,
|
||||
p75_latency_ms NUMERIC,
|
||||
p90_latency_ms NUMERIC,
|
||||
p95_latency_ms NUMERIC,
|
||||
p99_latency_ms NUMERIC,
|
||||
maximum_latency_ms NUMERIC
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS benchbase_results_details_recorded_at_timestamp_idx
|
||||
ON benchbase_results_details USING BRIN (recorded_at_timestamp);
|
||||
CREATE INDEX IF NOT EXISTS benchbase_results_details_suit_idx
|
||||
ON benchbase_results_details USING BTREE (suit text_pattern_ops);
|
||||
"""
|
||||
|
||||
try:
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute(create_table_query)
|
||||
conn.commit()
|
||||
print("Successfully created/verified benchbase_results_details table")
|
||||
except Exception as e:
|
||||
print(f"Error creating benchbase_results_details table: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def process_csv_results(csv_file_path, start_timestamp_ms, suit, revision, platform):
|
||||
"""Process CSV results and return data for database insertion."""
|
||||
try:
|
||||
# Read CSV file
|
||||
df = pd.read_csv(csv_file_path)
|
||||
|
||||
# Validate required columns exist
|
||||
required_columns = [
|
||||
"Time (seconds)",
|
||||
"Throughput (requests/second)",
|
||||
"Average Latency (millisecond)",
|
||||
"Minimum Latency (millisecond)",
|
||||
"25th Percentile Latency (millisecond)",
|
||||
"Median Latency (millisecond)",
|
||||
"75th Percentile Latency (millisecond)",
|
||||
"90th Percentile Latency (millisecond)",
|
||||
"95th Percentile Latency (millisecond)",
|
||||
"99th Percentile Latency (millisecond)",
|
||||
"Maximum Latency (millisecond)",
|
||||
]
|
||||
|
||||
missing_columns = [col for col in required_columns if col not in df.columns]
|
||||
if missing_columns:
|
||||
print(f"Error: Missing required columns in CSV: {missing_columns}")
|
||||
return []
|
||||
|
||||
csv_data = []
|
||||
|
||||
for _, row in df.iterrows():
|
||||
# Calculate timestamp: start_timestamp_ms + (time_seconds * 1000)
|
||||
time_seconds = row["Time (seconds)"]
|
||||
row_timestamp_ms = start_timestamp_ms + (time_seconds * 1000)
|
||||
|
||||
# Convert to UTC timestamp
|
||||
row_timestamp = datetime.fromtimestamp(
|
||||
row_timestamp_ms / 1000.0, tz=timezone.utc
|
||||
).isoformat()
|
||||
|
||||
csv_row = {
|
||||
"suit": suit,
|
||||
"revision": revision,
|
||||
"platform": platform,
|
||||
"recorded_at_timestamp": row_timestamp,
|
||||
"requests_per_second": float(row["Throughput (requests/second)"]),
|
||||
"average_latency_ms": float(row["Average Latency (millisecond)"]),
|
||||
"minimum_latency_ms": float(row["Minimum Latency (millisecond)"]),
|
||||
"p25_latency_ms": float(row["25th Percentile Latency (millisecond)"]),
|
||||
"median_latency_ms": float(row["Median Latency (millisecond)"]),
|
||||
"p75_latency_ms": float(row["75th Percentile Latency (millisecond)"]),
|
||||
"p90_latency_ms": float(row["90th Percentile Latency (millisecond)"]),
|
||||
"p95_latency_ms": float(row["95th Percentile Latency (millisecond)"]),
|
||||
"p99_latency_ms": float(row["99th Percentile Latency (millisecond)"]),
|
||||
"maximum_latency_ms": float(row["Maximum Latency (millisecond)"]),
|
||||
}
|
||||
csv_data.append(csv_row)
|
||||
|
||||
print(f"Processed {len(csv_data)} rows from CSV file")
|
||||
return csv_data
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: CSV file not found: {csv_file_path}")
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Error processing CSV file {csv_file_path}: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def insert_csv_results(conn, csv_data):
|
||||
"""Insert CSV results into benchbase_results_details table."""
|
||||
if not csv_data:
|
||||
print("No CSV data to insert")
|
||||
return
|
||||
|
||||
insert_query = """
|
||||
INSERT INTO benchbase_results_details
|
||||
(suit, revision, platform, recorded_at_timestamp, requests_per_second,
|
||||
average_latency_ms, minimum_latency_ms, p25_latency_ms, median_latency_ms,
|
||||
p75_latency_ms, p90_latency_ms, p95_latency_ms, p99_latency_ms, maximum_latency_ms)
|
||||
VALUES (%(suit)s, %(revision)s, %(platform)s, %(recorded_at_timestamp)s, %(requests_per_second)s,
|
||||
%(average_latency_ms)s, %(minimum_latency_ms)s, %(p25_latency_ms)s, %(median_latency_ms)s,
|
||||
%(p75_latency_ms)s, %(p90_latency_ms)s, %(p95_latency_ms)s, %(p99_latency_ms)s, %(maximum_latency_ms)s)
|
||||
"""
|
||||
|
||||
try:
|
||||
with conn.cursor() as cursor:
|
||||
cursor.executemany(insert_query, csv_data)
|
||||
conn.commit()
|
||||
print(
|
||||
f"Successfully inserted {len(csv_data)} detailed results into benchbase_results_details"
|
||||
)
|
||||
|
||||
# Log some sample data for verification
|
||||
sample = csv_data[0]
|
||||
print(
|
||||
f"Sample detail: {sample['requests_per_second']} req/s at {sample['recorded_at_timestamp']}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error inserting CSV results into database: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_load_log(log_file_path, scalefactor):
|
||||
"""Parse load log file and extract load metrics."""
|
||||
try:
|
||||
with open(log_file_path) as f:
|
||||
log_content = f.read()
|
||||
|
||||
# Regex patterns to match the timestamp lines
|
||||
loading_pattern = r"\[INFO \] (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}),\d{3}.*Loading data into TPCC database"
|
||||
finished_pattern = r"\[INFO \] (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}),\d{3}.*Finished loading data into TPCC database"
|
||||
|
||||
loading_match = re.search(loading_pattern, log_content)
|
||||
finished_match = re.search(finished_pattern, log_content)
|
||||
|
||||
if not loading_match or not finished_match:
|
||||
print(f"Warning: Could not find loading timestamps in log file {log_file_path}")
|
||||
return None
|
||||
|
||||
# Parse timestamps
|
||||
loading_time = datetime.strptime(loading_match.group(1), "%Y-%m-%d %H:%M:%S")
|
||||
finished_time = datetime.strptime(finished_match.group(1), "%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# Calculate duration in seconds
|
||||
duration_seconds = (finished_time - loading_time).total_seconds()
|
||||
|
||||
# Calculate throughput: scalefactor/warehouses: 10 warehouses is approx. 1 GB of data
|
||||
load_throughput = (scalefactor * 1024 / 10.0) / duration_seconds
|
||||
|
||||
# Convert end time to UTC timestamp for database
|
||||
finished_time_utc = finished_time.replace(tzinfo=timezone.utc).isoformat()
|
||||
|
||||
print(f"Load metrics: Duration={duration_seconds}s, Throughput={load_throughput:.2f} MB/s")
|
||||
|
||||
return {
|
||||
"duration_seconds": duration_seconds,
|
||||
"throughput_mb_per_sec": load_throughput,
|
||||
"end_timestamp": finished_time_utc,
|
||||
}
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Warning: Load log file not found: {log_file_path}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error parsing load log file {log_file_path}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def insert_load_metrics(conn, load_metrics, suit, revision, platform, labels_json):
|
||||
"""Insert load metrics into perf_test_results table."""
|
||||
if not load_metrics:
|
||||
print("No load metrics to insert")
|
||||
return
|
||||
|
||||
load_metrics_data = [
|
||||
{
|
||||
"suit": suit,
|
||||
"revision": revision,
|
||||
"platform": platform,
|
||||
"metric_name": "load_duration_seconds",
|
||||
"metric_value": load_metrics["duration_seconds"],
|
||||
"metric_unit": "seconds",
|
||||
"metric_report_type": "lower_is_better",
|
||||
"recorded_at_timestamp": load_metrics["end_timestamp"],
|
||||
"labels": labels_json,
|
||||
},
|
||||
{
|
||||
"suit": suit,
|
||||
"revision": revision,
|
||||
"platform": platform,
|
||||
"metric_name": "load_throughput",
|
||||
"metric_value": load_metrics["throughput_mb_per_sec"],
|
||||
"metric_unit": "MB/second",
|
||||
"metric_report_type": "higher_is_better",
|
||||
"recorded_at_timestamp": load_metrics["end_timestamp"],
|
||||
"labels": labels_json,
|
||||
},
|
||||
]
|
||||
|
||||
insert_query = """
|
||||
INSERT INTO perf_test_results
|
||||
(suit, revision, platform, metric_name, metric_value, metric_unit,
|
||||
metric_report_type, recorded_at_timestamp, labels)
|
||||
VALUES (%(suit)s, %(revision)s, %(platform)s, %(metric_name)s, %(metric_value)s,
|
||||
%(metric_unit)s, %(metric_report_type)s, %(recorded_at_timestamp)s, %(labels)s)
|
||||
"""
|
||||
|
||||
try:
|
||||
with conn.cursor() as cursor:
|
||||
cursor.executemany(insert_query, load_metrics_data)
|
||||
conn.commit()
|
||||
print(f"Successfully inserted {len(load_metrics_data)} load metrics into perf_test_results")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error inserting load metrics into database: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to parse arguments and upload results."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Upload BenchBase TPC-C results to perf_test_results database"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--summary-json", type=str, required=False, help="Path to the summary.json file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run-type",
|
||||
type=str,
|
||||
required=True,
|
||||
choices=["warmup", "opt-rate", "ramp-up", "load"],
|
||||
help="Type of benchmark run",
|
||||
)
|
||||
parser.add_argument("--min-cu", type=float, required=True, help="Minimum compute units")
|
||||
parser.add_argument("--max-cu", type=float, required=True, help="Maximum compute units")
|
||||
parser.add_argument("--project-id", type=str, required=True, help="Neon project ID")
|
||||
parser.add_argument(
|
||||
"--revision", type=str, required=True, help="Git commit hash (40 characters)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--connection-string", type=str, required=True, help="PostgreSQL connection string"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--results-csv",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Path to the results.csv file for detailed metrics upload",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--load-log",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Path to the load log file for load phase metrics",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--warehouses",
|
||||
type=int,
|
||||
required=False,
|
||||
help="Number of warehouses (scalefactor) for load metrics calculation",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate inputs
|
||||
if args.summary_json and not Path(args.summary_json).exists():
|
||||
print(f"Error: Summary JSON file does not exist: {args.summary_json}")
|
||||
sys.exit(1)
|
||||
|
||||
if not args.summary_json and not args.load_log:
|
||||
print("Error: Either summary JSON or load log file must be provided")
|
||||
sys.exit(1)
|
||||
|
||||
if len(args.revision) != 40:
|
||||
print(f"Warning: Revision should be 40 characters, got {len(args.revision)}")
|
||||
|
||||
# Load and process summary data if provided
|
||||
summary_data = None
|
||||
metrics = []
|
||||
|
||||
if args.summary_json:
|
||||
summary_data = load_summary_json(args.summary_json)
|
||||
metrics = extract_metrics(summary_data)
|
||||
if not metrics:
|
||||
print("Warning: No metrics found in summary JSON")
|
||||
|
||||
# Build common data for all metrics
|
||||
if summary_data:
|
||||
scalefactor = summary_data.get("scalefactor", "unknown")
|
||||
terminals = summary_data.get("terminals", "unknown")
|
||||
labels = build_labels(summary_data, args.project_id)
|
||||
else:
|
||||
# For load-only processing, use warehouses argument as scalefactor
|
||||
scalefactor = args.warehouses if args.warehouses else "unknown"
|
||||
terminals = "unknown"
|
||||
labels = {"project_id": args.project_id}
|
||||
|
||||
suit = build_suit_name(scalefactor, terminals, args.run_type, args.min_cu, args.max_cu)
|
||||
platform = f"prod-us-east-2-{args.project_id}"
|
||||
|
||||
# Convert timestamp - only needed for summary metrics and CSV processing
|
||||
current_timestamp_ms = None
|
||||
start_timestamp_ms = None
|
||||
recorded_at = None
|
||||
|
||||
if summary_data:
|
||||
current_timestamp_ms = summary_data.get("Current Timestamp (milliseconds)")
|
||||
start_timestamp_ms = summary_data.get("Start timestamp (milliseconds)")
|
||||
|
||||
if current_timestamp_ms:
|
||||
recorded_at = convert_timestamp_to_utc(current_timestamp_ms)
|
||||
else:
|
||||
print("Warning: No timestamp found in JSON, using current time")
|
||||
recorded_at = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
if not start_timestamp_ms:
|
||||
print("Warning: No start timestamp found in JSON, CSV upload may be incorrect")
|
||||
start_timestamp_ms = (
|
||||
current_timestamp_ms or datetime.now(timezone.utc).timestamp() * 1000
|
||||
)
|
||||
|
||||
# Print Grafana dashboard link for cross-service endpoint debugging
|
||||
if start_timestamp_ms and current_timestamp_ms:
|
||||
grafana_url = (
|
||||
f"https://neonprod.grafana.net/d/cdya0okb81zwga/cross-service-endpoint-debugging"
|
||||
f"?orgId=1&from={int(start_timestamp_ms)}&to={int(current_timestamp_ms)}"
|
||||
f"&timezone=utc&var-env=prod&var-input_project_id={args.project_id}"
|
||||
)
|
||||
print(f'Cross service endpoint dashboard for "{args.run_type}" phase: {grafana_url}')
|
||||
|
||||
# Prepare metrics data for database insertion (only if we have summary metrics)
|
||||
metrics_data = []
|
||||
if metrics and recorded_at:
|
||||
for metric_name, metric_value in metrics:
|
||||
metric_info = get_metric_info(metric_name)
|
||||
|
||||
row = {
|
||||
"suit": suit,
|
||||
"revision": args.revision,
|
||||
"platform": platform,
|
||||
"metric_name": metric_name,
|
||||
"metric_value": float(metric_value), # Ensure numeric type
|
||||
"metric_unit": metric_info["unit"],
|
||||
"metric_report_type": metric_info["report_type"],
|
||||
"recorded_at_timestamp": recorded_at,
|
||||
"labels": json.dumps(labels), # Convert to JSON string for JSONB column
|
||||
}
|
||||
metrics_data.append(row)
|
||||
|
||||
print(f"Prepared {len(metrics_data)} summary metrics for upload to database")
|
||||
print(f"Suit: {suit}")
|
||||
print(f"Platform: {platform}")
|
||||
|
||||
# Connect to database and insert metrics
|
||||
try:
|
||||
conn = psycopg2.connect(args.connection_string)
|
||||
|
||||
# Insert summary metrics into perf_test_results (if any)
|
||||
if metrics_data:
|
||||
insert_metrics(conn, metrics_data)
|
||||
else:
|
||||
print("No summary metrics to upload")
|
||||
|
||||
# Process and insert detailed CSV results if provided
|
||||
if args.results_csv:
|
||||
print(f"Processing detailed CSV results from: {args.results_csv}")
|
||||
|
||||
# Create table if it doesn't exist
|
||||
create_benchbase_results_details_table(conn)
|
||||
|
||||
# Process CSV data
|
||||
csv_data = process_csv_results(
|
||||
args.results_csv, start_timestamp_ms, suit, args.revision, platform
|
||||
)
|
||||
|
||||
# Insert CSV data
|
||||
if csv_data:
|
||||
insert_csv_results(conn, csv_data)
|
||||
else:
|
||||
print("No CSV data to upload")
|
||||
else:
|
||||
print("No CSV file provided, skipping detailed results upload")
|
||||
|
||||
# Process and insert load metrics if provided
|
||||
if args.load_log:
|
||||
print(f"Processing load metrics from: {args.load_log}")
|
||||
|
||||
# Parse load log and extract metrics
|
||||
load_metrics = parse_load_log(args.load_log, scalefactor)
|
||||
|
||||
# Insert load metrics
|
||||
if load_metrics:
|
||||
insert_load_metrics(
|
||||
conn, load_metrics, suit, args.revision, platform, json.dumps(labels)
|
||||
)
|
||||
else:
|
||||
print("No load metrics to upload")
|
||||
else:
|
||||
print("No load log file provided, skipping load metrics upload")
|
||||
|
||||
conn.close()
|
||||
print("Database upload completed successfully")
|
||||
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database connection/query error: {e}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -80,9 +80,7 @@ def test_perf_simple_many_relations_reldir(
|
||||
"""
|
||||
Test creating many relations in a single database.
|
||||
"""
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf={"rel_size_v2_enabled": "true" if reldir != "v1" else "false"}
|
||||
)
|
||||
env = neon_env_builder.init_start(initial_tenant_conf={"rel_size_v2_enabled": reldir != "v1"})
|
||||
ep = env.endpoints.create_start(
|
||||
"main",
|
||||
config_lines=[
|
||||
|
||||
@@ -26,7 +26,7 @@ def test_compute_pageserver_connection_stress(neon_env_builder: NeonEnvBuilder):
|
||||
# Enable failpoint before starting everything else up so that we exercise the retry
|
||||
# on fetching basebackup
|
||||
pageserver_http = env.pageserver.http_client()
|
||||
pageserver_http.configure_failpoints(("simulated-bad-compute-connection", "50%return(15)"))
|
||||
pageserver_http.configure_failpoints(("simulated-bad-compute-connection", "20%return(15)"))
|
||||
|
||||
env.create_branch("test_compute_pageserver_connection_stress")
|
||||
endpoint = env.endpoints.create_start("test_compute_pageserver_connection_stress")
|
||||
|
||||
@@ -3,14 +3,35 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.remote_storage import RemoteStorageKind
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder
|
||||
|
||||
|
||||
def test_change_pageserver(neon_env_builder: NeonEnvBuilder):
|
||||
def reconfigure_endpoint(endpoint: Endpoint, pageserver_id: int, use_explicit_reconfigure: bool):
|
||||
# It's important that we always update config.json before issuing any reconfigure requests
|
||||
# to make sure that PG-initiated config refresh doesn't mess things up by reverting to the old config.
|
||||
endpoint.update_pageservers_in_config(pageserver_id=pageserver_id)
|
||||
|
||||
# PG will automatically refresh its configuration if it detects connectivity issues with pageservers.
|
||||
# We also allow the test to explicitly request a reconfigure so that the test can be sure that the
|
||||
# endpoint is running with the latest configuration.
|
||||
#
|
||||
# Note that explicit reconfiguration is not required for the system to function or for this test to pass.
|
||||
# It is kept for reference as this is how this test used to work before the capability of initiating
|
||||
# configuration refreshes was added to compute nodes.
|
||||
if use_explicit_reconfigure:
|
||||
endpoint.reconfigure(pageserver_id=pageserver_id)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("use_explicit_reconfigure_for_failover", [False, True])
|
||||
def test_change_pageserver(
|
||||
neon_env_builder: NeonEnvBuilder, use_explicit_reconfigure_for_failover: bool
|
||||
):
|
||||
"""
|
||||
A relatively low level test of reconfiguring a compute's pageserver at runtime. Usually this
|
||||
is all done via the storage controller, but this test will disable the storage controller's compute
|
||||
@@ -72,7 +93,10 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder):
|
||||
execute("SELECT count(*) FROM foo")
|
||||
assert fetchone() == (100000,)
|
||||
|
||||
endpoint.reconfigure(pageserver_id=alt_pageserver_id)
|
||||
# Reconfigure the endpoint to use the alt pageserver. We issue an explicit reconfigure request here
|
||||
# regardless of test mode as this is testing the externally driven reconfiguration scenario, not the
|
||||
# compute-initiated reconfiguration scenario upon detecting failures.
|
||||
reconfigure_endpoint(endpoint, pageserver_id=alt_pageserver_id, use_explicit_reconfigure=True)
|
||||
|
||||
# Verify that the neon.pageserver_connstring GUC is set to the correct thing
|
||||
execute("SELECT setting FROM pg_settings WHERE name='neon.pageserver_connstring'")
|
||||
@@ -100,6 +124,12 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder):
|
||||
env.storage_controller.node_configure(env.pageservers[1].id, {"availability": "Offline"})
|
||||
env.storage_controller.reconcile_until_idle()
|
||||
|
||||
reconfigure_endpoint(
|
||||
endpoint,
|
||||
pageserver_id=env.pageservers[0].id,
|
||||
use_explicit_reconfigure=use_explicit_reconfigure_for_failover,
|
||||
)
|
||||
|
||||
endpoint.reconfigure(pageserver_id=env.pageservers[0].id)
|
||||
|
||||
execute("SELECT count(*) FROM foo")
|
||||
@@ -116,7 +146,11 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder):
|
||||
await asyncio.sleep(
|
||||
1
|
||||
) # Sleep for 1 second just to make sure we actually started our count(*) query
|
||||
endpoint.reconfigure(pageserver_id=env.pageservers[1].id)
|
||||
reconfigure_endpoint(
|
||||
endpoint,
|
||||
pageserver_id=env.pageservers[1].id,
|
||||
use_explicit_reconfigure=use_explicit_reconfigure_for_failover,
|
||||
)
|
||||
|
||||
def execute_count():
|
||||
execute("SELECT count(*) FROM FOO")
|
||||
|
||||
@@ -58,7 +58,7 @@ PREEMPT_GC_COMPACTION_TENANT_CONF = {
|
||||
"compaction_upper_limit": 6,
|
||||
"lsn_lease_length": "0s",
|
||||
# Enable gc-compaction
|
||||
"gc_compaction_enabled": "true",
|
||||
"gc_compaction_enabled": True,
|
||||
"gc_compaction_initial_threshold_kb": 1024, # At a small threshold
|
||||
"gc_compaction_ratio_percent": 1,
|
||||
# No PiTR interval and small GC horizon
|
||||
@@ -540,7 +540,7 @@ def test_pageserver_gc_compaction_trigger(neon_env_builder: NeonEnvBuilder):
|
||||
"pitr_interval": "0s",
|
||||
"gc_horizon": f"{1024 * 16}",
|
||||
"lsn_lease_length": "0s",
|
||||
"gc_compaction_enabled": "true",
|
||||
"gc_compaction_enabled": True,
|
||||
"gc_compaction_initial_threshold_kb": "16",
|
||||
"gc_compaction_ratio_percent": "50",
|
||||
# Do not generate image layers with create_image_layers
|
||||
|
||||
369
test_runner/regress/test_compute_termination.py
Normal file
369
test_runner/regress/test_compute_termination.py
Normal file
@@ -0,0 +1,369 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from fixtures.log_helper import log
|
||||
from typing_extensions import override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.port_distributor import PortDistributor
|
||||
|
||||
|
||||
def launch_compute_ctl(
|
||||
env: NeonEnv,
|
||||
endpoint_name: str,
|
||||
external_http_port: int,
|
||||
internal_http_port: int,
|
||||
pg_port: int,
|
||||
control_plane_port: int,
|
||||
) -> subprocess.Popen[str]:
|
||||
"""
|
||||
Helper function to launch compute_ctl process with common configuration.
|
||||
Returns the Popen process object.
|
||||
"""
|
||||
# Create endpoint directory structure following the standard pattern
|
||||
endpoint_path = env.repo_dir / "endpoints" / endpoint_name
|
||||
|
||||
# Clean up any existing endpoint directory to avoid conflicts
|
||||
if endpoint_path.exists():
|
||||
shutil.rmtree(endpoint_path)
|
||||
|
||||
endpoint_path.mkdir(mode=0o755, parents=True, exist_ok=True)
|
||||
|
||||
# pgdata path - compute_ctl will create this directory during basebackup
|
||||
pgdata_path = endpoint_path / "pgdata"
|
||||
|
||||
# Create log file in endpoint directory
|
||||
log_file = endpoint_path / "compute.log"
|
||||
log_handle = open(log_file, "w")
|
||||
|
||||
# Start compute_ctl pointing to our control plane
|
||||
compute_ctl_path = env.neon_binpath / "compute_ctl"
|
||||
connstr = f"postgresql://cloud_admin@localhost:{pg_port}/postgres"
|
||||
|
||||
# Find postgres binary path
|
||||
pg_bin_path = env.pg_distrib_dir / env.pg_version.v_prefixed / "bin" / "postgres"
|
||||
pg_lib_path = env.pg_distrib_dir / env.pg_version.v_prefixed / "lib"
|
||||
|
||||
env_vars = {
|
||||
"INSTANCE_ID": "lakebase-instance-id",
|
||||
"LD_LIBRARY_PATH": str(pg_lib_path), # Linux, etc.
|
||||
"DYLD_LIBRARY_PATH": str(pg_lib_path), # macOS
|
||||
}
|
||||
|
||||
cmd = [
|
||||
str(compute_ctl_path),
|
||||
"--external-http-port",
|
||||
str(external_http_port),
|
||||
"--internal-http-port",
|
||||
str(internal_http_port),
|
||||
"--pgdata",
|
||||
str(pgdata_path),
|
||||
"--connstr",
|
||||
connstr,
|
||||
"--pgbin",
|
||||
str(pg_bin_path),
|
||||
"--compute-id",
|
||||
endpoint_name, # Use endpoint_name as compute-id
|
||||
"--control-plane-uri",
|
||||
f"http://127.0.0.1:{control_plane_port}",
|
||||
"--lakebase-mode",
|
||||
"true",
|
||||
]
|
||||
|
||||
print(f"Launching compute_ctl with command: {cmd}")
|
||||
|
||||
# Start compute_ctl
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
env=env_vars,
|
||||
stdout=log_handle,
|
||||
stderr=subprocess.STDOUT, # Combine stderr with stdout
|
||||
text=True,
|
||||
)
|
||||
|
||||
return process
|
||||
|
||||
|
||||
def wait_for_compute_status(
|
||||
compute_process: subprocess.Popen[str],
|
||||
http_port: int,
|
||||
expected_status: str,
|
||||
timeout_seconds: int = 10,
|
||||
) -> None:
|
||||
"""
|
||||
Wait for compute_ctl to reach the expected status.
|
||||
Raises an exception if timeout is reached or process exits unexpectedly.
|
||||
"""
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout_seconds:
|
||||
try:
|
||||
# Try to connect to the HTTP endpoint
|
||||
response = requests.get(f"http://localhost:{http_port}/status", timeout=0.5)
|
||||
if response.status_code == 200:
|
||||
status_json = response.json()
|
||||
# Check if it's in expected status
|
||||
if status_json.get("status") == expected_status:
|
||||
return
|
||||
except (requests.ConnectionError, requests.Timeout):
|
||||
pass
|
||||
|
||||
# Check if process has exited
|
||||
if compute_process.poll() is not None:
|
||||
raise Exception(
|
||||
f"compute_ctl exited unexpectedly with code {compute_process.returncode}."
|
||||
)
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
# Timeout reached
|
||||
compute_process.terminate()
|
||||
raise Exception(
|
||||
f"compute_ctl failed to reach {expected_status} status within {timeout_seconds} seconds."
|
||||
)
|
||||
|
||||
|
||||
class EmptySpecHandler(BaseHTTPRequestHandler):
|
||||
"""HTTP handler that returns an Empty compute spec response"""
|
||||
|
||||
def do_GET(self):
|
||||
if self.path.startswith("/compute/api/v2/computes/") and self.path.endswith("/spec"):
|
||||
# Return empty status which will put compute in Empty state
|
||||
response: dict[str, Any] = {
|
||||
"status": "empty",
|
||||
"spec": None,
|
||||
"compute_ctl_config": {"jwks": {"keys": []}},
|
||||
}
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
else:
|
||||
self.send_error(404)
|
||||
|
||||
@override
|
||||
def log_message(self, format: str, *args: Any):
|
||||
# Suppress request logging
|
||||
pass
|
||||
|
||||
|
||||
def test_compute_terminate_empty(neon_simple_env: NeonEnv, port_distributor: PortDistributor):
|
||||
"""
|
||||
Test that terminating a compute in Empty status works correctly.
|
||||
|
||||
This tests the bug fix where terminating an Empty compute would hang
|
||||
waiting for a non-existent postgres process to terminate.
|
||||
"""
|
||||
env = neon_simple_env
|
||||
|
||||
# Get ports for our test
|
||||
control_plane_port = port_distributor.get_port()
|
||||
external_http_port = port_distributor.get_port()
|
||||
internal_http_port = port_distributor.get_port()
|
||||
pg_port = port_distributor.get_port()
|
||||
|
||||
# Start a simple HTTP server that will serve the Empty spec
|
||||
server = HTTPServer(("127.0.0.1", control_plane_port), EmptySpecHandler)
|
||||
server_thread = threading.Thread(target=server.serve_forever)
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
compute_process = None
|
||||
try:
|
||||
# Start compute_ctl with ephemeral tenant ID
|
||||
compute_process = launch_compute_ctl(
|
||||
env,
|
||||
"test-empty-compute",
|
||||
external_http_port,
|
||||
internal_http_port,
|
||||
pg_port,
|
||||
control_plane_port,
|
||||
)
|
||||
|
||||
# Wait for compute_ctl to start and report "empty" status
|
||||
wait_for_compute_status(compute_process, external_http_port, "empty")
|
||||
|
||||
# Now send terminate request
|
||||
response = requests.post(f"http://localhost:{external_http_port}/terminate")
|
||||
|
||||
# Verify that the termination request sends back a 200 OK response and is not abruptly terminated.
|
||||
assert response.status_code == 200, (
|
||||
f"Expected 200 OK, got {response.status_code}: {response.text}"
|
||||
)
|
||||
|
||||
# Wait for compute_ctl to exit
|
||||
exit_code = compute_process.wait(timeout=10)
|
||||
assert exit_code == 0, f"compute_ctl exited with non-zero code: {exit_code}"
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
server.shutdown()
|
||||
if compute_process and compute_process.poll() is None:
|
||||
compute_process.terminate()
|
||||
compute_process.wait()
|
||||
|
||||
|
||||
class SwitchableConfigHandler(BaseHTTPRequestHandler):
|
||||
"""HTTP handler that can switch between normal compute configs and compute configs without specs"""
|
||||
|
||||
return_empty_spec: bool = False
|
||||
tenant_id: TenantId | None = None
|
||||
timeline_id: TimelineId | None = None
|
||||
pageserver_port: int | None = None
|
||||
safekeeper_connstrs: list[str] | None = None
|
||||
|
||||
def do_GET(self):
|
||||
if self.path.startswith("/compute/api/v2/computes/") and self.path.endswith("/spec"):
|
||||
if self.return_empty_spec:
|
||||
# Return empty status
|
||||
response: dict[str, object | None] = {
|
||||
"status": "empty",
|
||||
"spec": None,
|
||||
"compute_ctl_config": {
|
||||
"jwks": {"keys": []},
|
||||
},
|
||||
}
|
||||
else:
|
||||
# Return normal attached spec
|
||||
response = {
|
||||
"status": "attached",
|
||||
"spec": {
|
||||
"format_version": 1.0,
|
||||
"cluster": {
|
||||
"roles": [],
|
||||
"databases": [],
|
||||
"postgresql_conf": "shared_preload_libraries='neon'",
|
||||
},
|
||||
"tenant_id": str(self.tenant_id) if self.tenant_id else "",
|
||||
"timeline_id": str(self.timeline_id) if self.timeline_id else "",
|
||||
"pageserver_connstring": f"postgres://no_user@localhost:{self.pageserver_port}"
|
||||
if self.pageserver_port
|
||||
else "",
|
||||
"safekeeper_connstrings": self.safekeeper_connstrs or [],
|
||||
"mode": "Primary",
|
||||
"skip_pg_catalog_updates": True,
|
||||
"reconfigure_concurrency": 1,
|
||||
"suspend_timeout_seconds": -1,
|
||||
},
|
||||
"compute_ctl_config": {
|
||||
"jwks": {"keys": []},
|
||||
},
|
||||
}
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
else:
|
||||
self.send_error(404)
|
||||
|
||||
@override
|
||||
def log_message(self, format: str, *args: Any):
|
||||
# Suppress request logging
|
||||
pass
|
||||
|
||||
|
||||
def test_compute_empty_spec_during_refresh_configuration(
|
||||
neon_simple_env: NeonEnv, port_distributor: PortDistributor
|
||||
):
|
||||
"""
|
||||
Test that compute exits when it receives an empty spec during refresh configuration state.
|
||||
|
||||
This test:
|
||||
1. Start compute with a normal spec
|
||||
2. Change the spec handler to return empty spec
|
||||
3. Trigger some condition to force compute to refresh configuration
|
||||
4. Verify that compute_ctl exits
|
||||
"""
|
||||
env = neon_simple_env
|
||||
|
||||
# Get ports for our test
|
||||
control_plane_port = port_distributor.get_port()
|
||||
external_http_port = port_distributor.get_port()
|
||||
internal_http_port = port_distributor.get_port()
|
||||
pg_port = port_distributor.get_port()
|
||||
|
||||
# Set up handler class variables
|
||||
SwitchableConfigHandler.tenant_id = env.initial_tenant
|
||||
SwitchableConfigHandler.timeline_id = env.initial_timeline
|
||||
SwitchableConfigHandler.pageserver_port = env.pageserver.service_port.pg
|
||||
# Convert comma-separated string to list
|
||||
safekeeper_connstrs = env.get_safekeeper_connstrs()
|
||||
if safekeeper_connstrs:
|
||||
SwitchableConfigHandler.safekeeper_connstrs = safekeeper_connstrs.split(",")
|
||||
else:
|
||||
SwitchableConfigHandler.safekeeper_connstrs = []
|
||||
SwitchableConfigHandler.return_empty_spec = False # Start with normal spec
|
||||
|
||||
# Start HTTP server with switchable spec handler
|
||||
server = HTTPServer(("127.0.0.1", control_plane_port), SwitchableConfigHandler)
|
||||
server_thread = threading.Thread(target=server.serve_forever)
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
compute_process = None
|
||||
try:
|
||||
# Start compute_ctl with tenant and timeline IDs
|
||||
# Use a unique endpoint name to avoid conflicts
|
||||
endpoint_name = f"test-refresh-compute-{os.getpid()}"
|
||||
compute_process = launch_compute_ctl(
|
||||
env,
|
||||
endpoint_name,
|
||||
external_http_port,
|
||||
internal_http_port,
|
||||
pg_port,
|
||||
control_plane_port,
|
||||
)
|
||||
|
||||
# Wait for compute_ctl to start and report "running" status
|
||||
wait_for_compute_status(compute_process, external_http_port, "running", timeout_seconds=30)
|
||||
|
||||
log.info("Compute is running. Now returning empty spec and trigger configuration refresh.")
|
||||
|
||||
# Switch spec fetch handler to return empty spec
|
||||
SwitchableConfigHandler.return_empty_spec = True
|
||||
|
||||
# Trigger a configuration refresh
|
||||
try:
|
||||
requests.post(f"http://localhost:{internal_http_port}/refresh_configuration")
|
||||
except requests.RequestException as e:
|
||||
log.info(f"Call to /refresh_configuration failed: {e}")
|
||||
log.info(
|
||||
"Ignoring the error, assuming that compute_ctl is already refreshing or has exited"
|
||||
)
|
||||
|
||||
# Wait for compute_ctl to exit (it should exit when it gets an empty spec during refresh)
|
||||
exit_start_time = time.time()
|
||||
while time.time() - exit_start_time < 30:
|
||||
if compute_process.poll() is not None:
|
||||
# Process exited
|
||||
break
|
||||
time.sleep(0.5)
|
||||
|
||||
# Verify that compute_ctl exited
|
||||
exit_code = compute_process.poll()
|
||||
if exit_code is None:
|
||||
compute_process.terminate()
|
||||
raise Exception("compute_ctl did not exit after receiving empty spec.")
|
||||
|
||||
# The exit code might not be 0 in this case since it's an unexpected termination
|
||||
# but we mainly care that it did exit
|
||||
assert exit_code is not None, "compute_ctl should have exited"
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
server.shutdown()
|
||||
if compute_process and compute_process.poll() is None:
|
||||
compute_process.terminate()
|
||||
compute_process.wait()
|
||||
137
test_runner/regress/test_hadron_ps_connectivity_metrics.py
Normal file
137
test_runner/regress/test_hadron_ps_connectivity_metrics.py
Normal file
@@ -0,0 +1,137 @@
|
||||
import json
|
||||
import shutil
|
||||
|
||||
from fixtures.common_types import TenantShardId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.metrics import parse_metrics
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder, NeonPageserver
|
||||
from requests.exceptions import ConnectionError
|
||||
|
||||
|
||||
# Helper function to attempt reconfiguration of the compute to point to a new pageserver. Note that in these tests,
|
||||
# we don't expect the reconfiguration attempts to go through, as we will be pointing the compute at a "wrong" pageserver.
|
||||
def _attempt_reconfiguration(endpoint: Endpoint, new_pageserver_id: int, timeout_sec: float):
|
||||
try:
|
||||
endpoint.reconfigure(pageserver_id=new_pageserver_id, timeout_sec=timeout_sec)
|
||||
except Exception as e:
|
||||
log.info(f"reconfiguration failed with exception {e}")
|
||||
pass
|
||||
|
||||
|
||||
def read_misrouted_metric_value(pageserver: NeonPageserver) -> float:
|
||||
return (
|
||||
pageserver.http_client()
|
||||
.get_metrics()
|
||||
.query_one("pageserver_misrouted_pagestream_requests_total")
|
||||
.value
|
||||
)
|
||||
|
||||
|
||||
def read_request_error_metric_value(endpoint: Endpoint) -> float:
|
||||
return (
|
||||
parse_metrics(endpoint.http_client().metrics())
|
||||
.query_one("pg_cctl_pagestream_request_errors_total")
|
||||
.value
|
||||
)
|
||||
|
||||
|
||||
def test_misrouted_to_secondary(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
):
|
||||
"""
|
||||
Tests that the following metrics are incremented when compute tries to talk to a secondary pageserver:
|
||||
- On pageserver receiving the request: pageserver_misrouted_pagestream_requests_total
|
||||
- On compute: pg_cctl_pagestream_request_errors_total
|
||||
"""
|
||||
neon_env_builder.num_pageservers = 2
|
||||
env = neon_env_builder.init_configs()
|
||||
env.broker.start()
|
||||
env.storage_controller.start()
|
||||
for ps in env.pageservers:
|
||||
ps.start()
|
||||
for sk in env.safekeepers:
|
||||
sk.start()
|
||||
|
||||
# Create a tenant that has one primary and one secondary. Due to primary/secondary placement constraints,
|
||||
# the primary and secondary pageservers will be different.
|
||||
tenant_id, _ = env.create_tenant(shard_count=1, placement_policy=json.dumps({"Attached": 1}))
|
||||
endpoint = env.endpoints.create(
|
||||
"main", tenant_id=tenant_id, config_lines=["neon.lakebase_mode = true"]
|
||||
)
|
||||
endpoint.respec(skip_pg_catalog_updates=False)
|
||||
endpoint.start()
|
||||
|
||||
# Get the primary pageserver serving the zero shard of the tenant, and detach it from the primary pageserver.
|
||||
# This test operation configures tenant directly on the pageserver/does not go through the storage controller,
|
||||
# so the compute does not get any notifications and will keep pointing at the detached pageserver.
|
||||
tenant_zero_shard = TenantShardId(tenant_id, shard_number=0, shard_count=1)
|
||||
|
||||
primary_ps = env.get_tenant_pageserver(tenant_zero_shard)
|
||||
secondary_ps = (
|
||||
env.pageservers[1] if primary_ps.id == env.pageservers[0].id else env.pageservers[0]
|
||||
)
|
||||
|
||||
# Now try to point the compute at the pageserver that is acting as secondary for the tenant. Test that the metrics
|
||||
# on both compute_ctl and the pageserver register the misrouted requests following the reconfiguration attempt.
|
||||
assert read_misrouted_metric_value(secondary_ps) == 0
|
||||
assert read_request_error_metric_value(endpoint) == 0
|
||||
_attempt_reconfiguration(endpoint, new_pageserver_id=secondary_ps.id, timeout_sec=2.0)
|
||||
assert read_misrouted_metric_value(secondary_ps) > 0
|
||||
try:
|
||||
assert read_request_error_metric_value(endpoint) > 0
|
||||
except ConnectionError:
|
||||
# When configuring PG to use misconfigured pageserver, PG will cancel the query after certain number of failed
|
||||
# reconfigure attempts. This will cause compute_ctl to exit.
|
||||
log.info("Cannot connect to PG, ignoring")
|
||||
pass
|
||||
|
||||
|
||||
def test_misrouted_to_ps_not_hosting_tenant(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
):
|
||||
"""
|
||||
Tests that the following metrics are incremented when compute tries to talk to a pageserver that does not host the tenant:
|
||||
- On pageserver receiving the request: pageserver_misrouted_pagestream_requests_total
|
||||
- On compute: pg_cctl_pagestream_request_errors_total
|
||||
"""
|
||||
neon_env_builder.num_pageservers = 2
|
||||
env = neon_env_builder.init_configs()
|
||||
env.broker.start()
|
||||
env.storage_controller.start(handle_ps_local_disk_loss=False)
|
||||
for ps in env.pageservers:
|
||||
ps.start()
|
||||
for sk in env.safekeepers:
|
||||
sk.start()
|
||||
|
||||
tenant_id, _ = env.create_tenant(shard_count=1)
|
||||
endpoint = env.endpoints.create(
|
||||
"main", tenant_id=tenant_id, config_lines=["neon.lakebase_mode = true"]
|
||||
)
|
||||
endpoint.respec(skip_pg_catalog_updates=False)
|
||||
endpoint.start()
|
||||
|
||||
tenant_ps_id = env.get_tenant_pageserver(
|
||||
TenantShardId(tenant_id, shard_number=0, shard_count=1)
|
||||
).id
|
||||
non_hosting_ps = (
|
||||
env.pageservers[1] if tenant_ps_id == env.pageservers[0].id else env.pageservers[0]
|
||||
)
|
||||
|
||||
# Clear the disk of the non-hosting PS to make sure that it indeed doesn't have any information about the tenant.
|
||||
non_hosting_ps.stop(immediate=True)
|
||||
shutil.rmtree(non_hosting_ps.tenant_dir())
|
||||
non_hosting_ps.start()
|
||||
|
||||
# Now try to point the compute to the non-hosting pageserver. Test that the metrics
|
||||
# on both compute_ctl and the pageserver register the misrouted requests following the reconfiguration attempt.
|
||||
assert read_misrouted_metric_value(non_hosting_ps) == 0
|
||||
assert read_request_error_metric_value(endpoint) == 0
|
||||
_attempt_reconfiguration(endpoint, new_pageserver_id=non_hosting_ps.id, timeout_sec=2.0)
|
||||
assert read_misrouted_metric_value(non_hosting_ps) > 0
|
||||
try:
|
||||
assert read_request_error_metric_value(endpoint) > 0
|
||||
except ConnectionError:
|
||||
# When configuring PG to use misconfigured pageserver, PG will cancel the query after certain number of failed
|
||||
# reconfigure attempts. This will cause compute_ctl to exit.
|
||||
log.info("Cannot connect to PG, ignoring")
|
||||
pass
|
||||
@@ -133,6 +133,9 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder, pause_apply: bool):
|
||||
tenant_conf = {
|
||||
# set PITR interval to be small, so we can do GC
|
||||
"pitr_interval": "0 s",
|
||||
# we want to control gc and checkpoint frequency precisely
|
||||
"gc_period": "0s",
|
||||
"compaction_period": "0s",
|
||||
# this test tests standby_horizon leases feature
|
||||
"standby_horizon_lease_length": "10s",
|
||||
}
|
||||
@@ -193,6 +196,23 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder, pause_apply: bool):
|
||||
client = pageserver.http_client()
|
||||
client.timeline_checkpoint(tenant_shard_id, timeline_id)
|
||||
client.timeline_compact(tenant_shard_id, timeline_id)
|
||||
# Wait for standby horizon to get propagated.
|
||||
# This shouldn't be necessary, but the current mechanism for
|
||||
# standby_horizon propagation is imperfect. Detailed
|
||||
# description in https://databricks.atlassian.net/browse/LKB-2499
|
||||
while True:
|
||||
val = client.get_metric_value(
|
||||
"pageserver_standby_horizon",
|
||||
{
|
||||
"tenant_id": str(tenant_shard_id.tenant_id),
|
||||
"shard_id": str(tenant_shard_id.shard_index),
|
||||
"timeline_id": str(timeline_id),
|
||||
},
|
||||
)
|
||||
log.info("waiting for next standby_horizon push from safekeeper, {val=}")
|
||||
if val != 0:
|
||||
break
|
||||
time.sleep(0.1)
|
||||
client.timeline_gc(tenant_shard_id, timeline_id, 0)
|
||||
|
||||
# Re-execute the query. The GetPage requests that this
|
||||
|
||||
@@ -164,6 +164,25 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod):
|
||||
check_prewarmed(method, client, desired)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
def test_lfc_prewarm_empty(neon_simple_env: NeonEnv):
|
||||
"""
|
||||
Test there are no errors when trying to offload or prewarm endpoint without cache using compute_ctl.
|
||||
Endpoint without cache is simulated by turning off LFC manually, but in cloud/ setup this is
|
||||
also reproduced on fresh endpoints
|
||||
"""
|
||||
env = neon_simple_env
|
||||
ep = env.endpoints.create_start("main", config_lines=["neon.file_cache_size_limit=0"])
|
||||
client = ep.http_client()
|
||||
conn = ep.connect()
|
||||
cur = conn.cursor()
|
||||
cur.execute("create schema neon; create extension neon with schema neon")
|
||||
method = PrewarmMethod.COMPUTE_CTL
|
||||
offload_lfc(method, client, cur)
|
||||
prewarm_endpoint(method, client, cur, None)
|
||||
assert client.prewarm_lfc_status()["status"] == "skipped"
|
||||
|
||||
|
||||
# autoprewarm isn't needed as we prewarm manually
|
||||
WORKLOAD_VALUES = METHOD_VALUES[:-1]
|
||||
WORKLOAD_IDS = METHOD_IDS[:-1]
|
||||
|
||||
@@ -16,7 +16,7 @@ def test_ondemand_download_pg_xact(neon_env_builder: NeonEnvBuilder, shard_count
|
||||
neon_env_builder.num_pageservers = shard_count
|
||||
|
||||
tenant_conf = {
|
||||
"lazy_slru_download": "true",
|
||||
"lazy_slru_download": True,
|
||||
# set PITR interval to be small, so we can do GC
|
||||
"pitr_interval": "0 s",
|
||||
}
|
||||
@@ -82,7 +82,7 @@ def test_ondemand_download_replica(neon_env_builder: NeonEnvBuilder, shard_count
|
||||
neon_env_builder.num_pageservers = shard_count
|
||||
|
||||
tenant_conf = {
|
||||
"lazy_slru_download": "true",
|
||||
"lazy_slru_download": True,
|
||||
}
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf=tenant_conf, initial_tenant_shard_count=shard_count
|
||||
@@ -141,7 +141,7 @@ def test_ondemand_download_after_wal_switch(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
|
||||
tenant_conf = {
|
||||
"lazy_slru_download": "true",
|
||||
"lazy_slru_download": True,
|
||||
}
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf)
|
||||
|
||||
|
||||
@@ -395,23 +395,6 @@ def test_max_wal_rate(neon_simple_env: NeonEnv):
|
||||
tuples = endpoint.safe_psql("SELECT backpressure_throttling_time();")
|
||||
assert tuples[0][0] == 0, "Backpressure throttling detected"
|
||||
|
||||
# 0 MB/s max_wal_rate. WAL proposer can still push some WALs but will be super slow.
|
||||
endpoint.safe_psql_many(
|
||||
[
|
||||
"ALTER SYSTEM SET databricks.max_wal_mb_per_second = 0;",
|
||||
"SELECT pg_reload_conf();",
|
||||
]
|
||||
)
|
||||
|
||||
# Write ~10 KB data should hit backpressure.
|
||||
with endpoint.cursor(dbname=DBNAME) as cur:
|
||||
cur.execute("SET databricks.max_wal_mb_per_second = 0;")
|
||||
for _ in range(0, 10):
|
||||
cur.execute("INSERT INTO usertable SELECT random(), repeat('a', 1000);")
|
||||
|
||||
tuples = endpoint.safe_psql("SELECT backpressure_throttling_time();")
|
||||
assert tuples[0][0] > 0, "No backpressure throttling detected"
|
||||
|
||||
# 1 MB/s max_wal_rate.
|
||||
endpoint.safe_psql_many(
|
||||
[
|
||||
|
||||
@@ -17,9 +17,6 @@ if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
|
||||
GET_CONNECTION_PID_QUERY = "SELECT pid FROM pg_stat_activity WHERE state = 'active'"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_pool_begin_1(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create user http_auth with password 'http' superuser")
|
||||
@@ -479,7 +476,7 @@ def test_sql_over_http_pool(static_proxy: NeonProxy):
|
||||
|
||||
def get_pid(status: int, pw: str, user="http_auth") -> Any:
|
||||
return static_proxy.http_query(
|
||||
GET_CONNECTION_PID_QUERY,
|
||||
"SELECT pg_backend_pid() as pid",
|
||||
[],
|
||||
user=user,
|
||||
password=pw,
|
||||
@@ -513,6 +510,35 @@ def test_sql_over_http_pool(static_proxy: NeonProxy):
|
||||
assert "password authentication failed for user" in res["message"]
|
||||
|
||||
|
||||
def test_sql_over_http_pool_settings(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create user http_auth with password 'http' superuser")
|
||||
|
||||
def multiquery(*queries) -> Any:
|
||||
results = static_proxy.http_multiquery(
|
||||
*queries,
|
||||
user="http_auth",
|
||||
password="http",
|
||||
expected_code=200,
|
||||
)
|
||||
|
||||
return [result["rows"] for result in results["results"]]
|
||||
|
||||
[[intervalstyle]] = static_proxy.safe_psql("SHOW IntervalStyle")
|
||||
assert intervalstyle == "postgres", "'postgres' is the default IntervalStyle in postgres"
|
||||
|
||||
result = multiquery("select '0 seconds'::interval as interval")
|
||||
assert result[0][0]["interval"] == "00:00:00", "interval is expected in postgres format"
|
||||
|
||||
result = multiquery(
|
||||
"SET IntervalStyle = 'iso_8601'",
|
||||
"select '0 seconds'::interval as interval",
|
||||
)
|
||||
assert result[1][0]["interval"] == "PT0S", "interval is expected in ISO-8601 format"
|
||||
|
||||
result = multiquery("select '0 seconds'::interval as interval")
|
||||
assert result[0][0]["interval"] == "00:00:00", "interval is expected in postgres format"
|
||||
|
||||
|
||||
def test_sql_over_http_urlencoding(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create user \"http+auth$$\" with password '%+$^&*@!' superuser")
|
||||
|
||||
@@ -544,23 +570,37 @@ def test_http_pool_begin(static_proxy: NeonProxy):
|
||||
query(200, "SELECT 1;") # Query that should succeed regardless of the transaction
|
||||
|
||||
|
||||
def test_sql_over_http_pool_idle(static_proxy: NeonProxy):
|
||||
def test_sql_over_http_pool_tx_reuse(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create user http_auth2 with password 'http' superuser")
|
||||
|
||||
def query(status: int, query: str) -> Any:
|
||||
def query(status: int, query: str, *args) -> Any:
|
||||
return static_proxy.http_query(
|
||||
query,
|
||||
[],
|
||||
args,
|
||||
user="http_auth2",
|
||||
password="http",
|
||||
expected_code=status,
|
||||
)
|
||||
|
||||
pid1 = query(200, GET_CONNECTION_PID_QUERY)["rows"][0]["pid"]
|
||||
def query_pid_txid() -> Any:
|
||||
result = query(
|
||||
200,
|
||||
"SELECT pg_backend_pid() as pid, pg_current_xact_id() as txid",
|
||||
)
|
||||
|
||||
return result["rows"][0]
|
||||
|
||||
res0 = query_pid_txid()
|
||||
|
||||
time.sleep(0.02)
|
||||
query(200, "BEGIN")
|
||||
pid2 = query(200, GET_CONNECTION_PID_QUERY)["rows"][0]["pid"]
|
||||
assert pid1 != pid2
|
||||
|
||||
res1 = query_pid_txid()
|
||||
res2 = query_pid_txid()
|
||||
|
||||
assert res0["pid"] == res1["pid"], "connection should be reused"
|
||||
assert res0["pid"] == res2["pid"], "connection should be reused"
|
||||
assert res1["txid"] != res2["txid"], "txid should be different"
|
||||
|
||||
|
||||
@pytest.mark.timeout(60)
|
||||
|
||||
@@ -90,6 +90,7 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod):
|
||||
secondary_cur.execute("select count(*) from t")
|
||||
assert secondary_cur.fetchone() == (100,)
|
||||
|
||||
primary_spec = primary.get_compute_spec()
|
||||
primary_endpoint_id = primary.endpoint_id
|
||||
stop_and_check_lsn(primary, expected_primary_lsn)
|
||||
|
||||
@@ -99,10 +100,9 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod):
|
||||
if method == PromoteMethod.COMPUTE_CTL:
|
||||
client = secondary.http_client()
|
||||
client.prewarm_lfc(primary_endpoint_id)
|
||||
# control plane knows safekeepers, simulate it by querying primary
|
||||
assert (lsn := primary.terminate_flush_lsn)
|
||||
safekeepers_lsn = {"safekeepers": safekeepers, "wal_flush_lsn": lsn}
|
||||
assert client.promote(safekeepers_lsn)["status"] == "completed"
|
||||
promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)}
|
||||
assert client.promote(promote_spec)["status"] == "completed"
|
||||
else:
|
||||
promo_cur.execute(f"alter system set neon.safekeepers='{safekeepers}'")
|
||||
promo_cur.execute("select pg_reload_conf()")
|
||||
@@ -131,21 +131,35 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod):
|
||||
|
||||
lsn_triple = get_lsn_triple(new_primary_cur)
|
||||
log.info(f"Secondary: LSN after workload is {lsn_triple}")
|
||||
expected_promoted_lsn = Lsn(lsn_triple[2])
|
||||
expected_lsn = Lsn(lsn_triple[2])
|
||||
|
||||
with secondary.connect() as conn, conn.cursor() as new_primary_cur:
|
||||
new_primary_cur.execute("select payload from t")
|
||||
assert new_primary_cur.fetchall() == [(it,) for it in range(1, 201)]
|
||||
|
||||
if method == PromoteMethod.COMPUTE_CTL:
|
||||
# compute_ctl's /promote switches replica type to Primary so it syncs
|
||||
# safekeepers on finish
|
||||
stop_and_check_lsn(secondary, expected_promoted_lsn)
|
||||
# compute_ctl's /promote switches replica type to Primary so it syncs safekeepers on finish
|
||||
stop_and_check_lsn(secondary, expected_lsn)
|
||||
else:
|
||||
# on testing postgres, we don't update replica type, secondaries don't
|
||||
# sync so lsn should be None
|
||||
# on testing postgres, we don't update replica type, secondaries don't sync so lsn should be None
|
||||
stop_and_check_lsn(secondary, None)
|
||||
|
||||
if method == PromoteMethod.COMPUTE_CTL:
|
||||
secondary.stop()
|
||||
# In production, compute ultimately receives new compute spec from cplane.
|
||||
secondary.respec(mode="Primary")
|
||||
secondary.start()
|
||||
|
||||
with secondary.connect() as conn, conn.cursor() as new_primary_cur:
|
||||
new_primary_cur.execute(
|
||||
"INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload"
|
||||
)
|
||||
assert new_primary_cur.fetchall() == [(it,) for it in range(101, 201)]
|
||||
lsn_triple = get_lsn_triple(new_primary_cur)
|
||||
log.info(f"Secondary: LSN after restart and workload is {lsn_triple}")
|
||||
expected_lsn = Lsn(lsn_triple[2])
|
||||
stop_and_check_lsn(secondary, expected_lsn)
|
||||
|
||||
primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary2")
|
||||
|
||||
with primary.connect() as new_primary, new_primary.cursor() as new_primary_cur:
|
||||
@@ -154,10 +168,11 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod):
|
||||
log.info(f"New primary: Boot LSN is {lsn_triple}")
|
||||
|
||||
new_primary_cur.execute("select count(*) from t")
|
||||
assert new_primary_cur.fetchone() == (200,)
|
||||
compute_ctl_count = 100 * (method == PromoteMethod.COMPUTE_CTL)
|
||||
assert new_primary_cur.fetchone() == (200 + compute_ctl_count,)
|
||||
new_primary_cur.execute("INSERT INTO t (payload) SELECT generate_series(201, 300)")
|
||||
new_primary_cur.execute("select count(*) from t")
|
||||
assert new_primary_cur.fetchone() == (300,)
|
||||
assert new_primary_cur.fetchone() == (300 + compute_ctl_count,)
|
||||
stop_and_check_lsn(primary, expected_primary_lsn)
|
||||
|
||||
|
||||
@@ -175,18 +190,91 @@ def test_replica_promote_handler_disconnects(neon_simple_env: NeonEnv):
|
||||
cur.execute("create schema neon;create extension neon with schema neon")
|
||||
cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)")
|
||||
cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)")
|
||||
cur.execute("show neon.safekeepers")
|
||||
safekeepers = cur.fetchall()[0][0]
|
||||
|
||||
primary.http_client().offload_lfc()
|
||||
primary_spec = primary.get_compute_spec()
|
||||
primary_endpoint_id = primary.endpoint_id
|
||||
primary.stop(mode="immediate-terminate")
|
||||
assert (lsn := primary.terminate_flush_lsn)
|
||||
|
||||
client = secondary.http_client()
|
||||
client.prewarm_lfc(primary_endpoint_id)
|
||||
safekeepers_lsn = {"safekeepers": safekeepers, "wal_flush_lsn": lsn}
|
||||
assert client.promote(safekeepers_lsn, disconnect=True)["status"] == "completed"
|
||||
promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)}
|
||||
assert client.promote(promote_spec, disconnect=True)["status"] == "completed"
|
||||
|
||||
with secondary.connect() as conn, conn.cursor() as cur:
|
||||
cur.execute("select count(*) from t")
|
||||
assert cur.fetchone() == (100,)
|
||||
cur.execute("INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload")
|
||||
cur.execute("select count(*) from t")
|
||||
assert cur.fetchone() == (200,)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
def test_replica_promote_fails(neon_simple_env: NeonEnv):
|
||||
"""
|
||||
Test that if a /promote route fails, we can safely start primary back
|
||||
"""
|
||||
env: NeonEnv = neon_simple_env
|
||||
primary: Endpoint = env.endpoints.create_start(branch_name="main", endpoint_id="primary")
|
||||
secondary: Endpoint = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary")
|
||||
secondary.stop()
|
||||
secondary.start(env={"FAILPOINTS": "compute-promotion=return(0)"})
|
||||
|
||||
with primary.connect() as conn, conn.cursor() as cur:
|
||||
cur.execute("create schema neon;create extension neon with schema neon")
|
||||
cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)")
|
||||
cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)")
|
||||
|
||||
primary.http_client().offload_lfc()
|
||||
primary_spec = primary.get_compute_spec()
|
||||
primary_endpoint_id = primary.endpoint_id
|
||||
primary.stop(mode="immediate-terminate")
|
||||
assert (lsn := primary.terminate_flush_lsn)
|
||||
|
||||
client = secondary.http_client()
|
||||
client.prewarm_lfc(primary_endpoint_id)
|
||||
promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)}
|
||||
assert client.promote(promote_spec)["status"] == "failed"
|
||||
secondary.stop()
|
||||
|
||||
primary.start()
|
||||
with primary.connect() as conn, conn.cursor() as cur:
|
||||
cur.execute("select count(*) from t")
|
||||
assert cur.fetchone() == (100,)
|
||||
cur.execute("INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload")
|
||||
cur.execute("select count(*) from t")
|
||||
assert cur.fetchone() == (200,)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
def test_replica_promote_prewarm_fails(neon_simple_env: NeonEnv):
|
||||
"""
|
||||
Test that if /lfc/prewarm route fails, we are able to promote
|
||||
"""
|
||||
env: NeonEnv = neon_simple_env
|
||||
primary: Endpoint = env.endpoints.create_start(branch_name="main", endpoint_id="primary")
|
||||
secondary: Endpoint = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary")
|
||||
secondary.stop()
|
||||
secondary.start(env={"FAILPOINTS": "compute-prewarm=return(0)"})
|
||||
|
||||
with primary.connect() as conn, conn.cursor() as cur:
|
||||
cur.execute("create schema neon;create extension neon with schema neon")
|
||||
cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)")
|
||||
cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)")
|
||||
|
||||
primary.http_client().offload_lfc()
|
||||
primary_spec = primary.get_compute_spec()
|
||||
primary_endpoint_id = primary.endpoint_id
|
||||
primary.stop(mode="immediate-terminate")
|
||||
assert (lsn := primary.terminate_flush_lsn)
|
||||
|
||||
client = secondary.http_client()
|
||||
with pytest.raises(AssertionError):
|
||||
client.prewarm_lfc(primary_endpoint_id)
|
||||
assert client.prewarm_lfc_status()["status"] == "failed"
|
||||
promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)}
|
||||
assert client.promote(promote_spec)["status"] == "completed"
|
||||
|
||||
with secondary.connect() as conn, conn.cursor() as cur:
|
||||
cur.execute("select count(*) from t")
|
||||
|
||||
@@ -1508,20 +1508,55 @@ def test_sharding_split_failures(
|
||||
env.storage_controller.consistency_check()
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="The backpressure change has not been merged yet.")
|
||||
# HADRON
|
||||
def test_create_tenant_after_split(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
Tests creating a tenant and a timeline should fail after a tenant split.
|
||||
"""
|
||||
env = neon_env_builder.init_start(initial_tenant_shard_count=4)
|
||||
|
||||
env.storage_controller.allowed_errors.extend(
|
||||
[
|
||||
".*already exists with a different shard count.*",
|
||||
]
|
||||
)
|
||||
|
||||
ep = env.endpoints.create_start("main", tenant_id=env.initial_tenant)
|
||||
ep.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);")
|
||||
ep.safe_psql("INSERT INTO usertable VALUES (1, 'test1');")
|
||||
ep.safe_psql("INSERT INTO usertable VALUES (2, 'test2');")
|
||||
ep.safe_psql("INSERT INTO usertable VALUES (3, 'test3');")
|
||||
|
||||
# Split the tenant
|
||||
|
||||
env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=8)
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
env.create_tenant(env.initial_tenant, env.initial_timeline, shard_count=4)
|
||||
|
||||
# run more queries
|
||||
ep.safe_psql("SELECT * FROM usertable;")
|
||||
ep.safe_psql("UPDATE usertable set FIELD0 = 'test4';")
|
||||
|
||||
ep.stop_and_destroy()
|
||||
|
||||
|
||||
# HADRON
|
||||
def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
Test backpressure can ignore new shards during tenant split so that if we abort the split,
|
||||
PG can continue without being blocked.
|
||||
Test backpressure works correctly during a shard split, especially after a split is aborted,
|
||||
PG will not be stuck forever.
|
||||
"""
|
||||
DBNAME = "regression"
|
||||
|
||||
init_shard_count = 4
|
||||
init_shard_count = 1
|
||||
neon_env_builder.num_pageservers = init_shard_count
|
||||
stripe_size = 32
|
||||
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_shard_count=init_shard_count, initial_tenant_shard_stripe_size=stripe_size
|
||||
initial_tenant_shard_count=init_shard_count,
|
||||
initial_tenant_shard_stripe_size=stripe_size,
|
||||
initial_tenant_conf={
|
||||
"checkpoint_distance": 1024 * 1024 * 1024,
|
||||
},
|
||||
)
|
||||
|
||||
env.storage_controller.allowed_errors.extend(
|
||||
@@ -1537,19 +1572,31 @@ def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder):
|
||||
"main",
|
||||
config_lines=[
|
||||
"max_replication_write_lag = 1MB",
|
||||
"databricks.max_wal_mb_per_second = 1",
|
||||
"neon.max_cluster_size = 10GB",
|
||||
"databricks.max_wal_mb_per_second=100",
|
||||
],
|
||||
)
|
||||
endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created.
|
||||
endpoint.respec(skip_pg_catalog_updates=False)
|
||||
endpoint.start()
|
||||
|
||||
endpoint.safe_psql(f"CREATE DATABASE {DBNAME}")
|
||||
|
||||
endpoint.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);")
|
||||
# generate 10MB of data
|
||||
endpoint.safe_psql(
|
||||
"CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 10000) s;"
|
||||
)
|
||||
write_done = Event()
|
||||
|
||||
def write_data(write_done):
|
||||
def get_write_lag():
|
||||
res = endpoint.safe_psql(
|
||||
"""
|
||||
SELECT
|
||||
pg_wal_lsn_diff(pg_current_wal_flush_lsn(), received_lsn) as received_lsn_lag
|
||||
FROM neon.backpressure_lsns();
|
||||
""",
|
||||
log_query=False,
|
||||
)
|
||||
return res[0][0]
|
||||
|
||||
def write_data(write_done: Event):
|
||||
while not write_done.is_set():
|
||||
endpoint.safe_psql(
|
||||
"INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False
|
||||
@@ -1560,35 +1607,39 @@ def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder):
|
||||
writer_thread.start()
|
||||
|
||||
env.storage_controller.configure_failpoints(("shard-split-pre-complete", "return(1)"))
|
||||
# sleep 10 seconds before re-activating the old shard when aborting the split.
|
||||
# this is to add some backpressures to PG
|
||||
env.pageservers[0].http_client().configure_failpoints(
|
||||
("attach-before-activate-sleep", "return(10000)"),
|
||||
)
|
||||
# split the tenant
|
||||
with pytest.raises(StorageControllerApiException):
|
||||
env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=16)
|
||||
env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=4)
|
||||
|
||||
def check_tenant_status():
|
||||
status = (
|
||||
env.pageservers[0].http_client().tenant_status(TenantShardId(env.initial_tenant, 0, 1))
|
||||
)
|
||||
assert status["state"]["slug"] == "Active"
|
||||
|
||||
wait_until(check_tenant_status)
|
||||
|
||||
write_done.set()
|
||||
writer_thread.join()
|
||||
|
||||
log.info(f"current write lag: {get_write_lag()}")
|
||||
|
||||
# writing more data to page servers after split is aborted
|
||||
for _i in range(5000):
|
||||
endpoint.safe_psql(
|
||||
"INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False
|
||||
)
|
||||
with endpoint.cursor() as cur:
|
||||
for _i in range(1000):
|
||||
cur.execute("INSERT INTO usertable SELECT random(), repeat('a', 1000);")
|
||||
|
||||
# wait until write lag becomes 0
|
||||
def check_write_lag_is_zero():
|
||||
res = endpoint.safe_psql(
|
||||
"""
|
||||
SELECT
|
||||
pg_wal_lsn_diff(pg_current_wal_flush_lsn(), received_lsn) as received_lsn_lag
|
||||
FROM neon.backpressure_lsns();
|
||||
""",
|
||||
dbname="databricks_system",
|
||||
log_query=False,
|
||||
)
|
||||
log.info(f"received_lsn_lag = {res[0][0]}")
|
||||
assert res[0][0] == 0
|
||||
res = get_write_lag()
|
||||
assert res == 0
|
||||
|
||||
wait_until(check_write_lag_is_zero)
|
||||
endpoint.stop_and_destroy()
|
||||
|
||||
|
||||
# BEGIN_HADRON
|
||||
@@ -1674,7 +1725,6 @@ def test_shard_resolve_during_split_abort(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
|
||||
# HADRON
|
||||
@pytest.mark.skip(reason="The backpressure change has not been merged yet.")
|
||||
def test_back_pressure_per_shard(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
Tests back pressure knobs are enforced on the per shard basis instead of at the tenant level.
|
||||
@@ -1701,22 +1751,19 @@ def test_back_pressure_per_shard(neon_env_builder: NeonEnvBuilder):
|
||||
"max_replication_apply_lag = 0",
|
||||
"max_replication_flush_lag = 15MB",
|
||||
"neon.max_cluster_size = 10GB",
|
||||
"neon.lakebase_mode = true",
|
||||
],
|
||||
)
|
||||
endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created.
|
||||
endpoint.respec(skip_pg_catalog_updates=False)
|
||||
endpoint.start()
|
||||
|
||||
# generate 20MB of data
|
||||
endpoint.safe_psql(
|
||||
"CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 20000) s;"
|
||||
)
|
||||
res = endpoint.safe_psql(
|
||||
"SELECT neon.backpressure_throttling_time() as throttling_time", dbname="databricks_system"
|
||||
)[0]
|
||||
res = endpoint.safe_psql("SELECT neon.backpressure_throttling_time() as throttling_time")[0]
|
||||
assert res[0] == 0, f"throttling_time should be 0, but got {res[0]}"
|
||||
|
||||
endpoint.stop()
|
||||
|
||||
|
||||
# HADRON
|
||||
def test_shard_split_page_server_timeout(neon_env_builder: NeonEnvBuilder):
|
||||
@@ -1880,14 +1927,14 @@ def test_sharding_backpressure(neon_env_builder: NeonEnvBuilder):
|
||||
shards_info()
|
||||
|
||||
for _write_iter in range(30):
|
||||
# approximately 1MB of data
|
||||
workload.write_rows(8000, upload=False)
|
||||
# approximately 10MB of data
|
||||
workload.write_rows(80000, upload=False)
|
||||
update_write_lsn()
|
||||
infos = shards_info()
|
||||
min_lsn = min(Lsn(info["last_record_lsn"]) for info in infos)
|
||||
max_lsn = max(Lsn(info["last_record_lsn"]) for info in infos)
|
||||
diff = max_lsn - min_lsn
|
||||
assert diff < 2 * 1024 * 1024, f"LSN diff={diff}, expected diff < 2MB due to backpressure"
|
||||
assert diff < 8 * 1024 * 1024, f"LSN diff={diff}, expected diff < 8MB due to backpressure"
|
||||
|
||||
|
||||
def test_sharding_unlogged_relation(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
Reference in New Issue
Block a user