mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-07 21:42:56 +00:00
Merge remote-tracking branch 'origin' into vlad/hadron-jwt
This commit is contained in:
@@ -21,13 +21,14 @@ platforms = [
|
||||
# "x86_64-apple-darwin",
|
||||
# "x86_64-pc-windows-msvc",
|
||||
]
|
||||
|
||||
[final-excludes]
|
||||
workspace-members = [
|
||||
# vm_monitor benefits from the same Cargo.lock as the rest of our artifacts, but
|
||||
# it is built primarly in separate repo neondatabase/autoscaling and thus is excluded
|
||||
# from depending on workspace-hack because most of the dependencies are not used.
|
||||
"vm_monitor",
|
||||
# subzero-core is a stub crate that should be excluded from workspace-hack
|
||||
"subzero-core",
|
||||
# All of these exist in libs and are not usually built independently.
|
||||
# Putting workspace hack there adds a bottleneck for cargo builds.
|
||||
"compute_api",
|
||||
|
||||
28
.github/actions/prepare-for-subzero/action.yml
vendored
Normal file
28
.github/actions/prepare-for-subzero/action.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: 'Prepare current job for subzero'
|
||||
description: >
|
||||
Set git token to access `neondatabase/subzero` from cargo build,
|
||||
and set `CARGO_NET_GIT_FETCH_WITH_CLI=true` env variable to use git CLI
|
||||
|
||||
inputs:
|
||||
token:
|
||||
description: 'GitHub token with access to neondatabase/subzero'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
steps:
|
||||
- name: Set git token for neondatabase/subzero
|
||||
uses: pyTooling/Actions/with-post-step@2307b526df64d55e95884e072e49aac2a00a9afa # v5.1.0
|
||||
env:
|
||||
SUBZERO_ACCESS_TOKEN: ${{ inputs.token }}
|
||||
with:
|
||||
main: |
|
||||
git config --global url."https://x-access-token:${SUBZERO_ACCESS_TOKEN}@github.com/neondatabase/subzero".insteadOf "https://github.com/neondatabase/subzero"
|
||||
cargo add -p proxy subzero-core --git https://github.com/neondatabase/subzero --rev 396264617e78e8be428682f87469bb25429af88a
|
||||
post: |
|
||||
git config --global --unset url."https://x-access-token:${SUBZERO_ACCESS_TOKEN}@github.com/neondatabase/subzero".insteadOf "https://github.com/neondatabase/subzero"
|
||||
|
||||
- name: Set `CARGO_NET_GIT_FETCH_WITH_CLI=true` env variable
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: echo "CARGO_NET_GIT_FETCH_WITH_CLI=true" >> ${GITHUB_ENV}
|
||||
@@ -86,6 +86,10 @@ jobs:
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: ./.github/actions/prepare-for-subzero
|
||||
with:
|
||||
token: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||
|
||||
- name: Set pg 14 revision for caching
|
||||
id: pg_v14_rev
|
||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||
@@ -116,7 +120,7 @@ jobs:
|
||||
ARCH: ${{ inputs.arch }}
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
run: |
|
||||
CARGO_FLAGS="--locked --features testing"
|
||||
CARGO_FLAGS="--locked --features testing,rest_broker"
|
||||
if [[ $BUILD_TYPE == "debug" && $ARCH == 'x64' ]]; then
|
||||
cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run"
|
||||
CARGO_PROFILE=""
|
||||
|
||||
4
.github/workflows/_check-codestyle-rust.yml
vendored
4
.github/workflows/_check-codestyle-rust.yml
vendored
@@ -46,6 +46,10 @@ jobs:
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: ./.github/actions/prepare-for-subzero
|
||||
with:
|
||||
token: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||
|
||||
- name: Cache cargo deps
|
||||
uses: tespkg/actions-cache@b7bf5fcc2f98a52ac6080eb0fd282c2f752074b1 # v1.8.0
|
||||
|
||||
4
.github/workflows/build-macos.yml
vendored
4
.github/workflows/build-macos.yml
vendored
@@ -54,6 +54,10 @@ jobs:
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: ./.github/actions/prepare-for-subzero
|
||||
with:
|
||||
token: ${{ secrets.CI_ACCESS_TOKEN }}
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
|
||||
2
.github/workflows/build_and_test.yml
vendored
2
.github/workflows/build_and_test.yml
vendored
@@ -632,6 +632,8 @@ jobs:
|
||||
BUILD_TAG=${{ needs.meta.outputs.release-tag || needs.meta.outputs.build-tag }}
|
||||
TAG=${{ needs.build-build-tools-image.outputs.image-tag }}-bookworm
|
||||
DEBIAN_VERSION=bookworm
|
||||
secrets: |
|
||||
SUBZERO_ACCESS_TOKEN=${{ secrets.CI_ACCESS_TOKEN }}
|
||||
provenance: false
|
||||
push: true
|
||||
pull: true
|
||||
|
||||
1
.github/workflows/neon_extra_builds.yml
vendored
1
.github/workflows/neon_extra_builds.yml
vendored
@@ -72,6 +72,7 @@ jobs:
|
||||
check-macos-build:
|
||||
needs: [ check-permissions, files-changed ]
|
||||
uses: ./.github/workflows/build-macos.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
pg_versions: ${{ needs.files-changed.outputs.postgres_changes }}
|
||||
rebuild_rust_code: ${{ fromJSON(needs.files-changed.outputs.rebuild_rust_code) }}
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -26,9 +26,14 @@ docker-compose/docker-compose-parallel.yml
|
||||
*.o
|
||||
*.so
|
||||
*.Po
|
||||
*.pid
|
||||
|
||||
# pgindent typedef lists
|
||||
*.list
|
||||
|
||||
# Node
|
||||
**/node_modules/
|
||||
|
||||
# various files for local testing
|
||||
/proxy/.subzero
|
||||
local_proxy.json
|
||||
|
||||
389
Cargo.lock
generated
389
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
22
Cargo.toml
22
Cargo.toml
@@ -49,6 +49,7 @@ members = [
|
||||
"libs/proxy/tokio-postgres2",
|
||||
"endpoint_storage",
|
||||
"pgxn/neon/communicator",
|
||||
"proxy/subzero_core",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -143,10 +144,10 @@ num_cpus = "1.15"
|
||||
num-traits = "0.2.19"
|
||||
oid-registry = "0.7.1"
|
||||
once_cell = "1.13"
|
||||
opentelemetry = "0.27"
|
||||
opentelemetry_sdk = "0.27"
|
||||
opentelemetry-otlp = { version = "0.27", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-semantic-conventions = "0.27"
|
||||
opentelemetry = "0.30"
|
||||
opentelemetry_sdk = "0.30"
|
||||
opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-semantic-conventions = "0.30"
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "53", default-features = false, features = ["zstd"] }
|
||||
parquet_derive = "53"
|
||||
@@ -158,11 +159,13 @@ procfs = "0.16"
|
||||
prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency
|
||||
prost = "0.13.5"
|
||||
prost-types = "0.13.5"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
# Remove after p256 is updated to 0.14.
|
||||
rand_core = "=0.6"
|
||||
redis = { version = "0.29.2", features = ["tokio-rustls-comp", "keep-alive"] }
|
||||
regex = "1.10.2"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] }
|
||||
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_27"] }
|
||||
reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_30"] }
|
||||
reqwest-middleware = "0.4"
|
||||
reqwest-retry = "0.7"
|
||||
routerify = "3"
|
||||
@@ -213,15 +216,12 @@ tonic = { version = "0.13.1", default-features = false, features = ["channel", "
|
||||
tonic-reflection = { version = "0.13.1", features = ["server"] }
|
||||
tower = { version = "0.5.2", default-features = false }
|
||||
tower-http = { version = "0.6.2", features = ["auth", "request-id", "trace"] }
|
||||
|
||||
# This revision uses opentelemetry 0.27. There's no tag for it.
|
||||
tower-otel = { git = "https://github.com/mattiapenati/tower-otel", rev = "56a7321053bcb72443888257b622ba0d43a11fcd" }
|
||||
|
||||
tower-otel = { version = "0.6", features = ["axum"] }
|
||||
tower-service = "0.3.3"
|
||||
tracing = "0.1"
|
||||
tracing-error = "0.2"
|
||||
tracing-log = "0.2"
|
||||
tracing-opentelemetry = "0.28"
|
||||
tracing-opentelemetry = "0.31"
|
||||
tracing-serde = "0.2.0"
|
||||
tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] }
|
||||
try-lock = "0.2.5"
|
||||
|
||||
26
Dockerfile
26
Dockerfile
@@ -63,7 +63,14 @@ WORKDIR /home/nonroot
|
||||
|
||||
COPY --chown=nonroot . .
|
||||
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
RUN --mount=type=secret,uid=1000,id=SUBZERO_ACCESS_TOKEN \
|
||||
set -e \
|
||||
&& if [ -s /run/secrets/SUBZERO_ACCESS_TOKEN ]; then \
|
||||
export CARGO_NET_GIT_FETCH_WITH_CLI=true && \
|
||||
git config --global url."https://$(cat /run/secrets/SUBZERO_ACCESS_TOKEN)@github.com/neondatabase/subzero".insteadOf "https://github.com/neondatabase/subzero" && \
|
||||
cargo add -p proxy subzero-core --git https://github.com/neondatabase/subzero --rev 396264617e78e8be428682f87469bb25429af88a; \
|
||||
fi \
|
||||
&& cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
# Main build image
|
||||
FROM $REPOSITORY/$IMAGE:$TAG AS build
|
||||
@@ -71,20 +78,33 @@ WORKDIR /home/nonroot
|
||||
ARG GIT_VERSION=local
|
||||
ARG BUILD_TAG
|
||||
ARG ADDITIONAL_RUSTFLAGS=""
|
||||
ENV CARGO_FEATURES="default"
|
||||
|
||||
# 3. Build cargo dependencies. Note that this step doesn't depend on anything else than
|
||||
# `recipe.json`, so the layer can be reused as long as none of the dependencies change.
|
||||
COPY --from=plan /home/nonroot/recipe.json recipe.json
|
||||
RUN set -e \
|
||||
RUN --mount=type=secret,uid=1000,id=SUBZERO_ACCESS_TOKEN \
|
||||
set -e \
|
||||
&& if [ -s /run/secrets/SUBZERO_ACCESS_TOKEN ]; then \
|
||||
export CARGO_NET_GIT_FETCH_WITH_CLI=true && \
|
||||
git config --global url."https://$(cat /run/secrets/SUBZERO_ACCESS_TOKEN)@github.com/neondatabase/subzero".insteadOf "https://github.com/neondatabase/subzero"; \
|
||||
fi \
|
||||
&& RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo chef cook --locked --release --recipe-path recipe.json
|
||||
|
||||
# Perform the main build. We reuse the Postgres build artifacts from the intermediate 'pg-build'
|
||||
# layer, and the cargo dependencies built in the previous step.
|
||||
COPY --chown=nonroot --from=pg-build /home/nonroot/pg_install/ pg_install
|
||||
COPY --chown=nonroot . .
|
||||
COPY --chown=nonroot --from=plan /home/nonroot/proxy/Cargo.toml proxy/Cargo.toml
|
||||
COPY --chown=nonroot --from=plan /home/nonroot/Cargo.lock Cargo.lock
|
||||
|
||||
RUN set -e \
|
||||
RUN --mount=type=secret,uid=1000,id=SUBZERO_ACCESS_TOKEN \
|
||||
set -e \
|
||||
&& if [ -s /run/secrets/SUBZERO_ACCESS_TOKEN ]; then \
|
||||
export CARGO_FEATURES="rest_broker"; \
|
||||
fi \
|
||||
&& RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo build \
|
||||
--features $CARGO_FEATURES \
|
||||
--bin pg_sni_router \
|
||||
--bin pageserver \
|
||||
--bin pagectl \
|
||||
|
||||
@@ -133,7 +133,7 @@ RUN case $DEBIAN_VERSION in \
|
||||
# Install newer version (3.25) from backports.
|
||||
# libstdc++-10-dev is required for plv8
|
||||
bullseye) \
|
||||
echo "deb http://deb.debian.org/debian bullseye-backports main" > /etc/apt/sources.list.d/bullseye-backports.list; \
|
||||
echo "deb http://archive.debian.org/debian bullseye-backports main" > /etc/apt/sources.list.d/bullseye-backports.list; \
|
||||
VERSION_INSTALLS="cmake/bullseye-backports cmake-data/bullseye-backports libstdc++-10-dev"; \
|
||||
;; \
|
||||
# Version-specific installs for Bookworm (PG17):
|
||||
|
||||
@@ -27,7 +27,10 @@ fail.workspace = true
|
||||
flate2.workspace = true
|
||||
futures.workspace = true
|
||||
http.workspace = true
|
||||
http-body-util.workspace = true
|
||||
hostname-validator = "1.1"
|
||||
hyper.workspace = true
|
||||
hyper-util.workspace = true
|
||||
indexmap.workspace = true
|
||||
itertools.workspace = true
|
||||
jsonwebtoken.workspace = true
|
||||
@@ -44,6 +47,7 @@ postgres.workspace = true
|
||||
regex.workspace = true
|
||||
reqwest = { workspace = true, features = ["json"] }
|
||||
ring = "0.17"
|
||||
scopeguard.workspace = true
|
||||
serde.workspace = true
|
||||
serde_with.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -138,6 +138,12 @@ struct Cli {
|
||||
/// Run in development mode, skipping VM-specific operations like process termination
|
||||
#[arg(long, action = clap::ArgAction::SetTrue)]
|
||||
pub dev: bool,
|
||||
|
||||
#[arg(long)]
|
||||
pub pg_init_timeout: Option<u64>,
|
||||
|
||||
#[arg(long, default_value_t = false, action = clap::ArgAction::Set)]
|
||||
pub lakebase_mode: bool,
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
@@ -188,7 +194,7 @@ fn main() -> Result<()> {
|
||||
.build()?;
|
||||
let _rt_guard = runtime.enter();
|
||||
|
||||
runtime.block_on(init(cli.dev))?;
|
||||
let tracing_provider = init(cli.dev)?;
|
||||
|
||||
// enable core dumping for all child processes
|
||||
setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
|
||||
@@ -219,6 +225,8 @@ fn main() -> Result<()> {
|
||||
installed_extensions_collection_interval: Arc::new(AtomicU64::new(
|
||||
cli.installed_extensions_collection_interval,
|
||||
)),
|
||||
pg_init_timeout: cli.pg_init_timeout.map(Duration::from_secs),
|
||||
lakebase_mode: cli.lakebase_mode,
|
||||
},
|
||||
config,
|
||||
)?;
|
||||
@@ -227,11 +235,11 @@ fn main() -> Result<()> {
|
||||
|
||||
scenario.teardown();
|
||||
|
||||
deinit_and_exit(exit_code);
|
||||
deinit_and_exit(tracing_provider, exit_code);
|
||||
}
|
||||
|
||||
async fn init(dev_mode: bool) -> Result<()> {
|
||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL).await?;
|
||||
fn init(dev_mode: bool) -> Result<Option<tracing_utils::Provider>> {
|
||||
let provider = init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||
|
||||
let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
|
||||
thread::spawn(move || {
|
||||
@@ -242,7 +250,7 @@ async fn init(dev_mode: bool) -> Result<()> {
|
||||
|
||||
info!("compute build_tag: {}", &BUILD_TAG.to_string());
|
||||
|
||||
Ok(())
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
fn get_config(cli: &Cli) -> Result<ComputeConfig> {
|
||||
@@ -267,25 +275,27 @@ fn get_config(cli: &Cli) -> Result<ComputeConfig> {
|
||||
}
|
||||
}
|
||||
|
||||
fn deinit_and_exit(exit_code: Option<i32>) -> ! {
|
||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||
// pending traces before we exit. Shutting down OTEL tracing provider may
|
||||
// hang for quite some time, see, for example:
|
||||
// - https://github.com/open-telemetry/opentelemetry-rust/issues/868
|
||||
// - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
|
||||
//
|
||||
// Yet, we want computes to shut down fast enough, as we may need a new one
|
||||
// for the same timeline ASAP. So wait no longer than 2s for the shutdown to
|
||||
// complete, then just error out and exit the main thread.
|
||||
info!("shutting down tracing");
|
||||
let (sender, receiver) = mpsc::channel();
|
||||
let _ = thread::spawn(move || {
|
||||
tracing_utils::shutdown_tracing();
|
||||
sender.send(()).ok()
|
||||
});
|
||||
let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
|
||||
if shutdown_res.is_err() {
|
||||
error!("timed out while shutting down tracing, exiting anyway");
|
||||
fn deinit_and_exit(tracing_provider: Option<tracing_utils::Provider>, exit_code: Option<i32>) -> ! {
|
||||
if let Some(p) = tracing_provider {
|
||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||
// pending traces before we exit. Shutting down OTEL tracing provider may
|
||||
// hang for quite some time, see, for example:
|
||||
// - https://github.com/open-telemetry/opentelemetry-rust/issues/868
|
||||
// - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
|
||||
//
|
||||
// Yet, we want computes to shut down fast enough, as we may need a new one
|
||||
// for the same timeline ASAP. So wait no longer than 2s for the shutdown to
|
||||
// complete, then just error out and exit the main thread.
|
||||
info!("shutting down tracing");
|
||||
let (sender, receiver) = mpsc::channel();
|
||||
let _ = thread::spawn(move || {
|
||||
_ = p.shutdown();
|
||||
sender.send(()).ok()
|
||||
});
|
||||
let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
|
||||
if shutdown_res.is_err() {
|
||||
error!("timed out while shutting down tracing, exiting anyway");
|
||||
}
|
||||
}
|
||||
|
||||
info!("shutting down");
|
||||
|
||||
98
compute_tools/src/communicator_socket_client.rs
Normal file
98
compute_tools/src/communicator_socket_client.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
//! Client for making request to a running Postgres server's communicator control socket.
|
||||
//!
|
||||
//! The storage communicator process that runs inside Postgres exposes an HTTP endpoint in
|
||||
//! a Unix Domain Socket in the Postgres data directory. This provides access to it.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use hyper::client::conn::http1::SendRequest;
|
||||
use hyper_util::rt::TokioIo;
|
||||
|
||||
/// Name of the socket within the Postgres data directory. This better match that in
|
||||
/// `pgxn/neon/communicator/src/lib.rs`.
|
||||
const NEON_COMMUNICATOR_SOCKET_NAME: &str = "neon-communicator.socket";
|
||||
|
||||
/// Open a connection to the communicator's control socket, prepare to send requests to it
|
||||
/// with hyper.
|
||||
pub async fn connect_communicator_socket<B>(pgdata: &Path) -> anyhow::Result<SendRequest<B>>
|
||||
where
|
||||
B: hyper::body::Body + 'static + Send,
|
||||
B::Data: Send,
|
||||
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
let socket_path = pgdata.join(NEON_COMMUNICATOR_SOCKET_NAME);
|
||||
let socket_path_len = socket_path.display().to_string().len();
|
||||
|
||||
// There is a limit of around 100 bytes (108 on Linux?) on the length of the path to a
|
||||
// Unix Domain socket. The limit is on the connect(2) function used to open the
|
||||
// socket, not on the absolute path itself. Postgres changes the current directory to
|
||||
// the data directory and uses a relative path to bind to the socket, and the relative
|
||||
// path "./neon-communicator.socket" is always short, but when compute_ctl needs to
|
||||
// open the socket, we need to use a full path, which can be arbitrarily long.
|
||||
//
|
||||
// There are a few ways we could work around this:
|
||||
//
|
||||
// 1. Change the current directory to the Postgres data directory and use a relative
|
||||
// path in the connect(2) call. That's problematic because the current directory
|
||||
// applies to the whole process. We could change the current directory early in
|
||||
// compute_ctl startup, and that might be a good idea anyway for other reasons too:
|
||||
// it would be more robust if the data directory is moved around or unlinked for
|
||||
// some reason, and you would be less likely to accidentally litter other parts of
|
||||
// the filesystem with e.g. temporary files. However, that's a pretty invasive
|
||||
// change.
|
||||
//
|
||||
// 2. On Linux, you could open() the data directory, and refer to the the socket
|
||||
// inside it as "/proc/self/fd/<fd>/neon-communicator.socket". But that's
|
||||
// Linux-only.
|
||||
//
|
||||
// 3. Create a symbolic link to the socket with a shorter path, and use that.
|
||||
//
|
||||
// We use the symbolic link approach here. Hopefully the paths we use in production
|
||||
// are shorter, so that we can open the socket directly, so that this hack is needed
|
||||
// only in development.
|
||||
let connect_result = if socket_path_len < 100 {
|
||||
// We can open the path directly with no hacks.
|
||||
tokio::net::UnixStream::connect(socket_path).await
|
||||
} else {
|
||||
// The path to the socket is too long. Create a symlink to it with a shorter path.
|
||||
let short_path = std::env::temp_dir().join(format!(
|
||||
"compute_ctl.short-socket.{}.{}",
|
||||
std::process::id(),
|
||||
tokio::task::id()
|
||||
));
|
||||
std::os::unix::fs::symlink(&socket_path, &short_path)?;
|
||||
|
||||
// Delete the symlink as soon as we have connected to it. There's a small chance
|
||||
// of leaking if the process dies before we remove it, so try to keep that window
|
||||
// as small as possible.
|
||||
scopeguard::defer! {
|
||||
if let Err(err) = std::fs::remove_file(&short_path) {
|
||||
tracing::warn!("could not remove symlink \"{}\" created for socket: {}",
|
||||
short_path.display(), err);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
"created symlink \"{}\" for socket \"{}\", opening it now",
|
||||
short_path.display(),
|
||||
socket_path.display()
|
||||
);
|
||||
|
||||
tokio::net::UnixStream::connect(&short_path).await
|
||||
};
|
||||
|
||||
let stream = connect_result.context("connecting to communicator control socket")?;
|
||||
|
||||
let io = TokioIo::new(stream);
|
||||
let (request_sender, connection) = hyper::client::conn::http1::handshake(io).await?;
|
||||
|
||||
// spawn a task to poll the connection and drive the HTTP state
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = connection.await {
|
||||
eprintln!("Error in connection: {err}");
|
||||
}
|
||||
});
|
||||
|
||||
Ok(request_sender)
|
||||
}
|
||||
@@ -113,6 +113,11 @@ pub struct ComputeNodeParams {
|
||||
|
||||
/// Interval for installed extensions collection
|
||||
pub installed_extensions_collection_interval: Arc<AtomicU64>,
|
||||
|
||||
/// Timeout of PG compute startup in the Init state.
|
||||
pub pg_init_timeout: Option<Duration>,
|
||||
|
||||
pub lakebase_mode: bool,
|
||||
}
|
||||
|
||||
type TaskHandle = Mutex<Option<JoinHandle<()>>>;
|
||||
@@ -154,6 +159,7 @@ pub struct RemoteExtensionMetrics {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ComputeState {
|
||||
pub start_time: DateTime<Utc>,
|
||||
pub pg_start_time: Option<DateTime<Utc>>,
|
||||
pub status: ComputeStatus,
|
||||
/// Timestamp of the last Postgres activity. It could be `None` if
|
||||
/// compute wasn't used since start.
|
||||
@@ -191,6 +197,7 @@ impl ComputeState {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
start_time: Utc::now(),
|
||||
pg_start_time: None,
|
||||
status: ComputeStatus::Empty,
|
||||
last_active: None,
|
||||
error: None,
|
||||
@@ -648,6 +655,9 @@ impl ComputeNode {
|
||||
};
|
||||
_this_entered = start_compute_span.enter();
|
||||
|
||||
// Hadron: Record postgres start time (used to enforce pg_init_timeout).
|
||||
state_guard.pg_start_time.replace(Utc::now());
|
||||
|
||||
state_guard.set_status(ComputeStatus::Init, &self.state_changed);
|
||||
compute_state = state_guard.clone()
|
||||
}
|
||||
@@ -1441,7 +1451,7 @@ impl ComputeNode {
|
||||
})?;
|
||||
|
||||
// Update pg_hba.conf received with basebackup.
|
||||
update_pg_hba(pgdata_path)?;
|
||||
update_pg_hba(pgdata_path, None)?;
|
||||
|
||||
// Place pg_dynshmem under /dev/shm. This allows us to use
|
||||
// 'dynamic_shared_memory_type = mmap' so that the files are placed in
|
||||
@@ -1746,6 +1756,7 @@ impl ComputeNode {
|
||||
}
|
||||
|
||||
// Run migrations separately to not hold up cold starts
|
||||
let lakebase_mode = self.params.lakebase_mode;
|
||||
let params = self.params.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut conf = conf.as_ref().clone();
|
||||
@@ -1758,7 +1769,7 @@ impl ComputeNode {
|
||||
eprintln!("connection error: {e}");
|
||||
}
|
||||
});
|
||||
if let Err(e) = handle_migrations(params, &mut client).await {
|
||||
if let Err(e) = handle_migrations(params, &mut client, lakebase_mode).await {
|
||||
error!("Failed to run migrations: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
60
compute_tools/src/hadron_metrics.rs
Normal file
60
compute_tools/src/hadron_metrics.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use metrics::{
|
||||
IntCounter, IntGaugeVec, core::Collector, proto::MetricFamily, register_int_counter,
|
||||
register_int_gauge_vec,
|
||||
};
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
// Counter keeping track of the number of PageStream request errors reported by Postgres.
|
||||
// An error is registered every time Postgres calls compute_ctl's /refresh_configuration API.
|
||||
// Postgres will invoke this API if it detected trouble with PageStream requests (get_page@lsn,
|
||||
// get_base_backup, etc.) it sends to any pageserver. An increase in this counter value typically
|
||||
// indicates Postgres downtime, as PageStream requests are critical for Postgres to function.
|
||||
pub static POSTGRES_PAGESTREAM_REQUEST_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
|
||||
register_int_counter!(
|
||||
"pg_cctl_pagestream_request_errors_total",
|
||||
"Number of PageStream request errors reported by the postgres process"
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
// Counter keeping track of the number of compute configuration errors due to Postgres statement
|
||||
// timeouts. An error is registered every time `ComputeNode::reconfigure()` fails due to Postgres
|
||||
// error code 57014 (query cancelled). This statement timeout typically occurs when postgres is
|
||||
// stuck in a problematic retry loop when the PS is reject its connection requests (usually due
|
||||
// to PG pointing at the wrong PS). We should investigate the root cause when this counter value
|
||||
// increases by checking PG and PS logs.
|
||||
pub static COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
|
||||
register_int_counter!(
|
||||
"pg_cctl_configure_statement_timeout_errors_total",
|
||||
"Number of compute configuration errors due to Postgres statement timeouts."
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub static COMPUTE_ATTACHED: Lazy<IntGaugeVec> = Lazy::new(|| {
|
||||
register_int_gauge_vec!(
|
||||
"pg_cctl_attached",
|
||||
"Compute node attached status (1 if attached)",
|
||||
&[
|
||||
"pg_compute_id",
|
||||
"pg_instance_id",
|
||||
"tenant_id",
|
||||
"timeline_id"
|
||||
]
|
||||
)
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
pub fn collect() -> Vec<MetricFamily> {
|
||||
let mut metrics = Vec::new();
|
||||
metrics.extend(POSTGRES_PAGESTREAM_REQUEST_ERRORS.collect());
|
||||
metrics.extend(COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS.collect());
|
||||
metrics.extend(COMPUTE_ATTACHED.collect());
|
||||
metrics
|
||||
}
|
||||
|
||||
pub fn initialize_metrics() {
|
||||
Lazy::force(&POSTGRES_PAGESTREAM_REQUEST_ERRORS);
|
||||
Lazy::force(&COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS);
|
||||
Lazy::force(&COMPUTE_ATTACHED);
|
||||
}
|
||||
@@ -1,10 +1,18 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::body::Body;
|
||||
use axum::extract::State;
|
||||
use axum::response::Response;
|
||||
use http::StatusCode;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, StatusCode};
|
||||
use metrics::proto::MetricFamily;
|
||||
use metrics::{Encoder, TextEncoder};
|
||||
|
||||
use crate::communicator_socket_client::connect_communicator_socket;
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::http::JsonResponse;
|
||||
use crate::metrics::collect;
|
||||
|
||||
@@ -31,3 +39,42 @@ pub(in crate::http) async fn get_metrics() -> Response {
|
||||
.body(Body::from(buffer))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Fetch and forward metrics from the Postgres neon extension's metrics
|
||||
/// exporter that are used by autoscaling-agent.
|
||||
///
|
||||
/// The neon extension exposes these metrics over a Unix domain socket
|
||||
/// in the data directory. That's not accessible directly from the outside
|
||||
/// world, so we have this endpoint in compute_ctl to expose it
|
||||
pub(in crate::http) async fn get_autoscaling_metrics(
|
||||
State(compute): State<Arc<ComputeNode>>,
|
||||
) -> Result<Response, Response> {
|
||||
let pgdata = Path::new(&compute.params.pgdata);
|
||||
|
||||
// Connect to the communicator process's metrics socket
|
||||
let mut metrics_client = connect_communicator_socket(pgdata)
|
||||
.await
|
||||
.map_err(|e| JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, format!("{e:#}")))?;
|
||||
|
||||
// Make a request for /autoscaling_metrics
|
||||
let request = Request::builder()
|
||||
.method("GET")
|
||||
.uri("/autoscaling_metrics")
|
||||
.header("Host", "localhost") // hyper requires Host, even though the server won't care
|
||||
.body(Body::from(""))
|
||||
.unwrap();
|
||||
let resp = metrics_client
|
||||
.send_request(request)
|
||||
.await
|
||||
.context("fetching metrics from Postgres metrics service")
|
||||
.map_err(|e| JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, format!("{e:#}")))?;
|
||||
|
||||
// Build a response that just forwards the response we got.
|
||||
let mut response = Response::builder();
|
||||
response = response.status(resp.status());
|
||||
if let Some(content_type) = resp.headers().get(CONTENT_TYPE) {
|
||||
response = response.header(CONTENT_TYPE, content_type);
|
||||
}
|
||||
let body = tonic::service::AxumBody::from_stream(resp.into_body().into_data_stream());
|
||||
Ok(response.body(body).unwrap())
|
||||
}
|
||||
|
||||
@@ -81,8 +81,12 @@ impl From<&Server> for Router<Arc<ComputeNode>> {
|
||||
Server::External {
|
||||
config, compute_id, ..
|
||||
} => {
|
||||
let unauthenticated_router =
|
||||
Router::<Arc<ComputeNode>>::new().route("/metrics", get(metrics::get_metrics));
|
||||
let unauthenticated_router = Router::<Arc<ComputeNode>>::new()
|
||||
.route("/metrics", get(metrics::get_metrics))
|
||||
.route(
|
||||
"/autoscaling_metrics",
|
||||
get(metrics::get_autoscaling_metrics),
|
||||
);
|
||||
|
||||
let authenticated_router = Router::<Arc<ComputeNode>>::new()
|
||||
.route("/lfc/prewarm", get(lfc::prewarm_state).post(lfc::prewarm))
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#![deny(clippy::undocumented_unsafe_blocks)]
|
||||
|
||||
pub mod checker;
|
||||
pub mod communicator_socket_client;
|
||||
pub mod config;
|
||||
pub mod configurator;
|
||||
pub mod http;
|
||||
@@ -15,6 +16,7 @@ pub mod compute_prewarm;
|
||||
pub mod compute_promote;
|
||||
pub mod disk_quota;
|
||||
pub mod extension_server;
|
||||
pub mod hadron_metrics;
|
||||
pub mod installed_extensions;
|
||||
pub mod local_proxy;
|
||||
pub mod lsn_lease;
|
||||
|
||||
@@ -13,7 +13,9 @@ use tracing_subscriber::prelude::*;
|
||||
/// set `OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318`. See
|
||||
/// `tracing-utils` package description.
|
||||
///
|
||||
pub async fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
||||
pub fn init_tracing_and_logging(
|
||||
default_log_level: &str,
|
||||
) -> anyhow::Result<Option<tracing_utils::Provider>> {
|
||||
// Initialize Logging
|
||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||
@@ -24,8 +26,9 @@ pub async fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result
|
||||
.with_writer(std::io::stderr);
|
||||
|
||||
// Initialize OpenTelemetry
|
||||
let otlp_layer =
|
||||
tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default()).await;
|
||||
let provider =
|
||||
tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default());
|
||||
let otlp_layer = provider.as_ref().map(tracing_utils::layer);
|
||||
|
||||
// Put it all together
|
||||
tracing_subscriber::registry()
|
||||
@@ -37,7 +40,7 @@ pub async fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result
|
||||
|
||||
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||
|
||||
Ok(())
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
/// Replace all newline characters with a special character to make it
|
||||
|
||||
@@ -9,15 +9,20 @@ use crate::metrics::DB_MIGRATION_FAILED;
|
||||
pub(crate) struct MigrationRunner<'m> {
|
||||
client: &'m mut Client,
|
||||
migrations: &'m [&'m str],
|
||||
lakebase_mode: bool,
|
||||
}
|
||||
|
||||
impl<'m> MigrationRunner<'m> {
|
||||
/// Create a new migration runner
|
||||
pub fn new(client: &'m mut Client, migrations: &'m [&'m str]) -> Self {
|
||||
pub fn new(client: &'m mut Client, migrations: &'m [&'m str], lakebase_mode: bool) -> Self {
|
||||
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
|
||||
assert!(migrations.len() + 1 < i64::MAX as usize);
|
||||
|
||||
Self { client, migrations }
|
||||
Self {
|
||||
client,
|
||||
migrations,
|
||||
lakebase_mode,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current value neon_migration.migration_id
|
||||
@@ -130,8 +135,13 @@ impl<'m> MigrationRunner<'m> {
|
||||
// ID is also the next index
|
||||
let migration_id = (current_migration + 1) as i64;
|
||||
let migration = self.migrations[current_migration];
|
||||
let migration = if self.lakebase_mode {
|
||||
migration.replace("neon_superuser", "databricks_superuser")
|
||||
} else {
|
||||
migration.to_string()
|
||||
};
|
||||
|
||||
match Self::run_migration(self.client, migration_id, migration).await {
|
||||
match Self::run_migration(self.client, migration_id, &migration).await {
|
||||
Ok(_) => {
|
||||
info!("Finished migration id={}", migration_id);
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ use tracing::{Level, error, info, instrument, span};
|
||||
use crate::compute::ComputeNode;
|
||||
use crate::metrics::{PG_CURR_DOWNTIME_MS, PG_TOTAL_DOWNTIME_MS};
|
||||
|
||||
const PG_DEFAULT_INIT_TIMEOUIT: Duration = Duration::from_secs(60);
|
||||
const MONITOR_CHECK_INTERVAL: Duration = Duration::from_millis(500);
|
||||
|
||||
/// Struct to store runtime state of the compute monitor thread.
|
||||
@@ -352,13 +353,47 @@ impl ComputeMonitor {
|
||||
// Hang on condition variable waiting until the compute status is `Running`.
|
||||
fn wait_for_postgres_start(compute: &ComputeNode) {
|
||||
let mut state = compute.state.lock().unwrap();
|
||||
let pg_init_timeout = compute
|
||||
.params
|
||||
.pg_init_timeout
|
||||
.unwrap_or(PG_DEFAULT_INIT_TIMEOUIT);
|
||||
|
||||
while state.status != ComputeStatus::Running {
|
||||
info!("compute is not running, waiting before monitoring activity");
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
if !compute.params.lakebase_mode {
|
||||
state = compute.state_changed.wait(state).unwrap();
|
||||
|
||||
if state.status == ComputeStatus::Running {
|
||||
break;
|
||||
if state.status == ComputeStatus::Running {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if state.pg_start_time.is_some()
|
||||
&& Utc::now()
|
||||
.signed_duration_since(state.pg_start_time.unwrap())
|
||||
.to_std()
|
||||
.unwrap_or_default()
|
||||
> pg_init_timeout
|
||||
{
|
||||
// If Postgres isn't up and running with working PS/SK connections within POSTGRES_STARTUP_TIMEOUT, it is
|
||||
// possible that we started Postgres with a wrong spec (so it is talking to the wrong PS/SK nodes). To prevent
|
||||
// deadends we simply exit (panic) the compute node so it can restart with the latest spec.
|
||||
//
|
||||
// NB: We skip this check if we have not attempted to start PG yet (indicated by state.pg_start_up == None).
|
||||
// This is to make sure the more appropriate errors are surfaced if we encounter issues before we even attempt
|
||||
// to start PG (e.g., if we can't pull the spec, can't sync safekeepers, or can't get the basebackup).
|
||||
error!(
|
||||
"compute did not enter Running state in {} seconds, exiting",
|
||||
pg_init_timeout.as_secs()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
state = compute
|
||||
.state_changed
|
||||
.wait_timeout(state, Duration::from_secs(5))
|
||||
.unwrap()
|
||||
.0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,9 @@ use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{Result, bail};
|
||||
use compute_api::responses::TlsConfig;
|
||||
use compute_api::spec::{Database, GenericOption, GenericOptions, PgIdent, Role};
|
||||
use compute_api::spec::{
|
||||
Database, DatabricksSettings, GenericOption, GenericOptions, PgIdent, Role,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use indexmap::IndexMap;
|
||||
use ini::Ini;
|
||||
@@ -184,6 +186,42 @@ impl DatabaseExt for Database {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DatabricksSettingsExt {
|
||||
fn as_pg_settings(&self) -> String;
|
||||
}
|
||||
|
||||
impl DatabricksSettingsExt for DatabricksSettings {
|
||||
fn as_pg_settings(&self) -> String {
|
||||
// Postgres GUCs rendered from DatabricksSettings
|
||||
vec![
|
||||
// ssl_ca_file
|
||||
Some(format!(
|
||||
"ssl_ca_file = '{}'",
|
||||
self.pg_compute_tls_settings.ca_file
|
||||
)),
|
||||
// [Optional] databricks.workspace_url
|
||||
Some(format!(
|
||||
"databricks.workspace_url = '{}'",
|
||||
&self.databricks_workspace_host
|
||||
)),
|
||||
// todo(vikas.jain): these are not required anymore as they are moved to static
|
||||
// conf but keeping these to avoid image mismatch between hcc and pg.
|
||||
// Once hcc and pg are in sync, we can remove these.
|
||||
//
|
||||
// databricks.enable_databricks_identity_login
|
||||
Some("databricks.enable_databricks_identity_login = true".to_string()),
|
||||
// databricks.enable_sql_restrictions
|
||||
Some("databricks.enable_sql_restrictions = true".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
// Removes `None`s
|
||||
.flatten()
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n")
|
||||
+ "\n"
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic trait used to provide quoting / encoding for strings used in the
|
||||
/// Postgres SQL queries and DATABASE_URL.
|
||||
pub trait Escaping {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use std::fs::File;
|
||||
use std::fs::{self, Permissions};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Result, anyhow, bail};
|
||||
@@ -133,10 +135,25 @@ pub fn get_config_from_control_plane(base_uri: &str, compute_id: &str) -> Result
|
||||
}
|
||||
|
||||
/// Check `pg_hba.conf` and update if needed to allow external connections.
|
||||
pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> {
|
||||
pub fn update_pg_hba(pgdata_path: &Path, databricks_pg_hba: Option<&String>) -> Result<()> {
|
||||
// XXX: consider making it a part of config.json
|
||||
let pghba_path = pgdata_path.join("pg_hba.conf");
|
||||
|
||||
// Update pg_hba to contains databricks specfic settings before adding neon settings
|
||||
// PG uses the first record that matches to perform authentication, so we need to have
|
||||
// our rules before the default ones from neon.
|
||||
// See https://www.postgresql.org/docs/16/auth-pg-hba-conf.html
|
||||
if let Some(databricks_pg_hba) = databricks_pg_hba {
|
||||
if config::line_in_file(
|
||||
&pghba_path,
|
||||
&format!("include_if_exists {}\n", *databricks_pg_hba),
|
||||
)? {
|
||||
info!("updated pg_hba.conf to include databricks_pg_hba.conf");
|
||||
} else {
|
||||
info!("pg_hba.conf already included databricks_pg_hba.conf");
|
||||
}
|
||||
}
|
||||
|
||||
if config::line_in_file(&pghba_path, PG_HBA_ALL_MD5)? {
|
||||
info!("updated pg_hba.conf to allow external connections");
|
||||
} else {
|
||||
@@ -146,6 +163,59 @@ pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check `pg_ident.conf` and update if needed to allow databricks config.
|
||||
pub fn update_pg_ident(pgdata_path: &Path, databricks_pg_ident: Option<&String>) -> Result<()> {
|
||||
info!("checking pg_ident.conf");
|
||||
let pghba_path = pgdata_path.join("pg_ident.conf");
|
||||
|
||||
// Update pg_ident to contains databricks specfic settings
|
||||
if let Some(databricks_pg_ident) = databricks_pg_ident {
|
||||
if config::line_in_file(
|
||||
&pghba_path,
|
||||
&format!("include_if_exists {}\n", *databricks_pg_ident),
|
||||
)? {
|
||||
info!("updated pg_ident.conf to include databricks_pg_ident.conf");
|
||||
} else {
|
||||
info!("pg_ident.conf already included databricks_pg_ident.conf");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Copy tls key_file and cert_file from k8s secret mount directory
|
||||
/// to pgdata and set private key file permissions as expected by Postgres.
|
||||
/// See this doc for expected permission <https://www.postgresql.org/docs/current/ssl-tcp.html>
|
||||
/// K8s secrets mount on dblet does not honor permission and ownership
|
||||
/// specified in the Volume or VolumeMount. So we need to explicitly copy the file and set the permissions.
|
||||
pub fn copy_tls_certificates(
|
||||
key_file: &String,
|
||||
cert_file: &String,
|
||||
pgdata_path: &Path,
|
||||
) -> Result<()> {
|
||||
let files = [cert_file, key_file];
|
||||
for file in files.iter() {
|
||||
let source = Path::new(file);
|
||||
let dest = pgdata_path.join(source.file_name().unwrap());
|
||||
if !dest.exists() {
|
||||
std::fs::copy(source, &dest)?;
|
||||
info!(
|
||||
"Copying tls file: {} to {}",
|
||||
&source.display(),
|
||||
&dest.display()
|
||||
);
|
||||
}
|
||||
if *file == key_file {
|
||||
// Postgres requires private key to be readable only by the owner by having
|
||||
// chmod 600 permissions.
|
||||
let permissions = Permissions::from_mode(0o600);
|
||||
fs::set_permissions(&dest, permissions)?;
|
||||
info!("Setting permission on {}.", &dest.display());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a standby.signal file
|
||||
pub fn add_standby_signal(pgdata_path: &Path) -> Result<()> {
|
||||
// XXX: consider making it a part of config.json
|
||||
@@ -170,7 +240,11 @@ pub async fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> {
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn handle_migrations(params: ComputeNodeParams, client: &mut Client) -> Result<()> {
|
||||
pub async fn handle_migrations(
|
||||
params: ComputeNodeParams,
|
||||
client: &mut Client,
|
||||
lakebase_mode: bool,
|
||||
) -> Result<()> {
|
||||
info!("handle migrations");
|
||||
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
@@ -234,7 +308,7 @@ pub async fn handle_migrations(params: ComputeNodeParams, client: &mut Client) -
|
||||
),
|
||||
];
|
||||
|
||||
MigrationRunner::new(client, &migrations)
|
||||
MigrationRunner::new(client, &migrations, lakebase_mode)
|
||||
.run_migrations()
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -411,7 +411,8 @@ impl ComputeNode {
|
||||
.map(|limit| match limit {
|
||||
0..10 => limit,
|
||||
10..30 => 10,
|
||||
30.. => limit / 3,
|
||||
30..300 => limit / 3,
|
||||
300.. => 100,
|
||||
})
|
||||
// If we didn't find max_connections, default to 10 concurrent connections.
|
||||
.unwrap_or(10)
|
||||
|
||||
@@ -407,6 +407,12 @@ struct StorageControllerStartCmdArgs {
|
||||
help = "Base port for the storage controller instance idenfified by instance-id (defaults to pageserver cplane api)"
|
||||
)]
|
||||
base_port: Option<u16>,
|
||||
|
||||
#[clap(
|
||||
long,
|
||||
help = "Whether the storage controller should handle pageserver-reported local disk loss events."
|
||||
)]
|
||||
handle_ps_local_disk_loss: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
@@ -1813,6 +1819,7 @@ async fn handle_storage_controller(
|
||||
instance_id: args.instance_id,
|
||||
base_port: args.base_port,
|
||||
start_timeout: args.start_timeout,
|
||||
handle_ps_local_disk_loss: args.handle_ps_local_disk_loss,
|
||||
};
|
||||
|
||||
if let Err(e) = svc.start(start_args).await {
|
||||
|
||||
@@ -56,6 +56,7 @@ pub struct NeonStorageControllerStartArgs {
|
||||
pub instance_id: u8,
|
||||
pub base_port: Option<u16>,
|
||||
pub start_timeout: humantime::Duration,
|
||||
pub handle_ps_local_disk_loss: Option<bool>,
|
||||
}
|
||||
|
||||
impl NeonStorageControllerStartArgs {
|
||||
@@ -64,6 +65,7 @@ impl NeonStorageControllerStartArgs {
|
||||
instance_id: 1,
|
||||
base_port: None,
|
||||
start_timeout,
|
||||
handle_ps_local_disk_loss: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -732,6 +734,10 @@ impl StorageController {
|
||||
|
||||
println!("Starting storage controller at {scheme}://{host}:{listen_port}");
|
||||
|
||||
if start_args.handle_ps_local_disk_loss.unwrap_or_default() {
|
||||
args.push("--handle-ps-local-disk-loss".to_string());
|
||||
}
|
||||
|
||||
background_process::start_process(
|
||||
COMMAND,
|
||||
&instance_dir,
|
||||
|
||||
@@ -35,6 +35,7 @@ reason = "The paste crate is a build-only dependency with no runtime components.
|
||||
# More documentation for the licenses section can be found here:
|
||||
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
|
||||
[licenses]
|
||||
version = 2
|
||||
allow = [
|
||||
"0BSD",
|
||||
"Apache-2.0",
|
||||
|
||||
@@ -233,7 +233,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.as_millis();
|
||||
use rand::Rng;
|
||||
let random = rand::thread_rng().r#gen::<u32>();
|
||||
let random = rand::rng().random::<u32>();
|
||||
|
||||
let s3_config = remote_storage::S3Config {
|
||||
bucket_name: var(REAL_S3_BUCKET).unwrap(),
|
||||
|
||||
@@ -416,6 +416,32 @@ pub struct GenericOption {
|
||||
pub vartype: String,
|
||||
}
|
||||
|
||||
/// Postgres compute TLS settings.
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
|
||||
pub struct PgComputeTlsSettings {
|
||||
// Absolute path to the certificate file for server-side TLS.
|
||||
pub cert_file: String,
|
||||
// Absolute path to the private key file for server-side TLS.
|
||||
pub key_file: String,
|
||||
// Absolute path to the certificate authority file for verifying client certificates.
|
||||
pub ca_file: String,
|
||||
}
|
||||
|
||||
/// Databricks specific options for compute instance.
|
||||
/// This is used to store any other settings that needs to be propagate to Compute
|
||||
/// but should not be persisted to ComputeSpec in the database.
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
|
||||
pub struct DatabricksSettings {
|
||||
pub pg_compute_tls_settings: PgComputeTlsSettings,
|
||||
// Absolute file path to databricks_pg_hba.conf file.
|
||||
pub databricks_pg_hba: String,
|
||||
// Absolute file path to databricks_pg_ident.conf file.
|
||||
pub databricks_pg_ident: String,
|
||||
// Hostname portion of the Databricks workspace URL of the endpoint, or empty string if not known.
|
||||
// A valid hostname is required for the compute instance to support PAT logins.
|
||||
pub databricks_workspace_host: String,
|
||||
}
|
||||
|
||||
/// Optional collection of `GenericOption`'s. Type alias allows us to
|
||||
/// declare a `trait` on it.
|
||||
pub type GenericOptions = Option<Vec<GenericOption>>;
|
||||
|
||||
@@ -90,7 +90,7 @@ impl<'a> IdempotencyKey<'a> {
|
||||
IdempotencyKey {
|
||||
now: Utc::now(),
|
||||
node_id,
|
||||
nonce: rand::thread_rng().gen_range(0..=9999),
|
||||
nonce: rand::rng().random_range(0..=9999),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ impl NodeOs {
|
||||
|
||||
/// Generate a random number in range [0, max).
|
||||
pub fn random(&self, max: u64) -> u64 {
|
||||
self.internal.rng.lock().gen_range(0..max)
|
||||
self.internal.rng.lock().random_range(0..max)
|
||||
}
|
||||
|
||||
/// Append a new event to the world event log.
|
||||
|
||||
@@ -32,10 +32,10 @@ impl Delay {
|
||||
/// Generate a random delay in range [min, max]. Return None if the
|
||||
/// message should be dropped.
|
||||
pub fn delay(&self, rng: &mut StdRng) -> Option<u64> {
|
||||
if rng.gen_bool(self.fail_prob) {
|
||||
if rng.random_bool(self.fail_prob) {
|
||||
return None;
|
||||
}
|
||||
Some(rng.gen_range(self.min..=self.max))
|
||||
Some(rng.random_range(self.min..=self.max))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ impl World {
|
||||
/// Create a new random number generator.
|
||||
pub fn new_rng(&self) -> StdRng {
|
||||
let mut rng = self.rng.lock();
|
||||
StdRng::from_rng(rng.deref_mut()).unwrap()
|
||||
StdRng::from_rng(rng.deref_mut())
|
||||
}
|
||||
|
||||
/// Create a new node.
|
||||
|
||||
@@ -17,5 +17,5 @@ procfs.workspace = true
|
||||
measured-process.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
rand_distr = "0.4.3"
|
||||
rand.workspace = true
|
||||
rand_distr = "0.5"
|
||||
|
||||
@@ -260,7 +260,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_small() {
|
||||
let (actual, estimate) = test_cardinality(100, Zipf::new(100, 1.2f64).unwrap());
|
||||
let (actual, estimate) = test_cardinality(100, Zipf::new(100.0, 1.2f64).unwrap());
|
||||
|
||||
assert_eq!(actual, [46, 30, 32]);
|
||||
assert!(51.3 < estimate[0] && estimate[0] < 51.4);
|
||||
@@ -270,7 +270,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_medium() {
|
||||
let (actual, estimate) = test_cardinality(10000, Zipf::new(10000, 1.2f64).unwrap());
|
||||
let (actual, estimate) = test_cardinality(10000, Zipf::new(10000.0, 1.2f64).unwrap());
|
||||
|
||||
assert_eq!(actual, [2529, 1618, 1629]);
|
||||
assert!(2309.1 < estimate[0] && estimate[0] < 2309.2);
|
||||
@@ -280,7 +280,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_large() {
|
||||
let (actual, estimate) = test_cardinality(1_000_000, Zipf::new(1_000_000, 1.2f64).unwrap());
|
||||
let (actual, estimate) =
|
||||
test_cardinality(1_000_000, Zipf::new(1_000_000.0, 1.2f64).unwrap());
|
||||
|
||||
assert_eq!(actual, [129077, 79579, 79630]);
|
||||
assert!(126067.2 < estimate[0] && estimate[0] < 126067.3);
|
||||
@@ -290,7 +291,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_small2() {
|
||||
let (actual, estimate) = test_cardinality(100, Zipf::new(200, 0.8f64).unwrap());
|
||||
let (actual, estimate) = test_cardinality(100, Zipf::new(200.0, 0.8f64).unwrap());
|
||||
|
||||
assert_eq!(actual, [92, 58, 60]);
|
||||
assert!(116.1 < estimate[0] && estimate[0] < 116.2);
|
||||
@@ -300,7 +301,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_medium2() {
|
||||
let (actual, estimate) = test_cardinality(10000, Zipf::new(20000, 0.8f64).unwrap());
|
||||
let (actual, estimate) = test_cardinality(10000, Zipf::new(20000.0, 0.8f64).unwrap());
|
||||
|
||||
assert_eq!(actual, [8201, 5131, 5051]);
|
||||
assert!(6846.4 < estimate[0] && estimate[0] < 6846.5);
|
||||
@@ -310,7 +311,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_large2() {
|
||||
let (actual, estimate) = test_cardinality(1_000_000, Zipf::new(2_000_000, 0.8f64).unwrap());
|
||||
let (actual, estimate) =
|
||||
test_cardinality(1_000_000, Zipf::new(2_000_000.0, 0.8f64).unwrap());
|
||||
|
||||
assert_eq!(actual, [777847, 482069, 482246]);
|
||||
assert!(699437.4 < estimate[0] && estimate[0] < 699437.5);
|
||||
|
||||
@@ -16,5 +16,5 @@ rustc-hash.workspace = true
|
||||
tempfile = "3.14.0"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.9"
|
||||
rand.workspace = true
|
||||
rand_distr = "0.5.1"
|
||||
|
||||
@@ -394,7 +394,7 @@ impl From<&OtelExporterConfig> for tracing_utils::ExportConfig {
|
||||
tracing_utils::ExportConfig {
|
||||
endpoint: Some(val.endpoint.clone()),
|
||||
protocol: val.protocol.into(),
|
||||
timeout: val.timeout,
|
||||
timeout: Some(val.timeout),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -596,6 +596,7 @@ pub struct TimelineImportRequest {
|
||||
pub timeline_id: TimelineId,
|
||||
pub start_lsn: Lsn,
|
||||
pub sk_set: Vec<NodeId>,
|
||||
pub force_upsert: bool,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize, Clone)]
|
||||
|
||||
@@ -981,12 +981,12 @@ mod tests {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
|
||||
|
||||
let key = Key {
|
||||
field1: rng.r#gen(),
|
||||
field2: rng.r#gen(),
|
||||
field3: rng.r#gen(),
|
||||
field4: rng.r#gen(),
|
||||
field5: rng.r#gen(),
|
||||
field6: rng.r#gen(),
|
||||
field1: rng.random(),
|
||||
field2: rng.random(),
|
||||
field3: rng.random(),
|
||||
field4: rng.random(),
|
||||
field5: rng.random(),
|
||||
field6: rng.random(),
|
||||
};
|
||||
|
||||
assert_eq!(key, Key::from_str(&format!("{key}")).unwrap());
|
||||
|
||||
@@ -443,9 +443,9 @@ pub struct ImportPgdataIdempotencyKey(pub String);
|
||||
impl ImportPgdataIdempotencyKey {
|
||||
pub fn random() -> Self {
|
||||
use rand::Rng;
|
||||
use rand::distributions::Alphanumeric;
|
||||
use rand::distr::Alphanumeric;
|
||||
Self(
|
||||
rand::thread_rng()
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(20)
|
||||
.map(char::from)
|
||||
|
||||
@@ -21,6 +21,14 @@ pub struct ReAttachRequest {
|
||||
/// if the node already has a node_id set.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub register: Option<NodeRegisterRequest>,
|
||||
|
||||
/// Hadron: Optional flag to indicate whether the node is starting with an empty local disk.
|
||||
/// Will be set to true if the node couldn't find any local tenant data on startup, could be
|
||||
/// due to the node starting for the first time or due to a local SSD failure/disk wipe event.
|
||||
/// The flag may be used by the storage controller to update its observed state of the world
|
||||
/// to make sure that it sends explicit location_config calls to the node following the
|
||||
/// re-attach request.
|
||||
pub empty_local_disk: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
|
||||
@@ -203,12 +203,12 @@ impl fmt::Display for CancelKeyData {
|
||||
}
|
||||
}
|
||||
|
||||
use rand::distributions::{Distribution, Standard};
|
||||
impl Distribution<CancelKeyData> for Standard {
|
||||
use rand::distr::{Distribution, StandardUniform};
|
||||
impl Distribution<CancelKeyData> for StandardUniform {
|
||||
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> CancelKeyData {
|
||||
CancelKeyData {
|
||||
backend_pid: rng.r#gen(),
|
||||
cancel_key: rng.r#gen(),
|
||||
backend_pid: rng.random(),
|
||||
cancel_key: rng.random(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,10 +155,10 @@ pub struct ScramSha256 {
|
||||
|
||||
fn nonce() -> String {
|
||||
// rand 0.5's ThreadRng is cryptographically secure
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
(0..NONCE_LENGTH)
|
||||
.map(|_| {
|
||||
let mut v = rng.gen_range(0x21u8..0x7e);
|
||||
let mut v = rng.random_range(0x21u8..0x7e);
|
||||
if v == 0x2c {
|
||||
v = 0x7e
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ const SCRAM_DEFAULT_SALT_LEN: usize = 16;
|
||||
/// special characters that would require escaping in an SQL command.
|
||||
pub async fn scram_sha_256(password: &[u8]) -> String {
|
||||
let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN];
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
rng.fill_bytes(&mut salt);
|
||||
scram_sha_256_salt(password, salt).await
|
||||
}
|
||||
|
||||
@@ -11,9 +11,8 @@ use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
use crate::connect::connect;
|
||||
use crate::connect_raw::{RawConnection, connect_raw};
|
||||
use crate::connect_raw::{self, StartupStream};
|
||||
use crate::connect_tls::connect_tls;
|
||||
use crate::maybe_tls_stream::MaybeTlsStream;
|
||||
use crate::tls::{MakeTlsConnect, TlsConnect, TlsStream};
|
||||
use crate::{Client, Connection, Error};
|
||||
|
||||
@@ -244,24 +243,26 @@ impl Config {
|
||||
&self,
|
||||
stream: S,
|
||||
tls: T,
|
||||
) -> Result<RawConnection<S, T::Stream>, Error>
|
||||
) -> Result<StartupStream<S, T::Stream>, Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsConnect<S>,
|
||||
{
|
||||
let stream = connect_tls(stream, self.ssl_mode, tls).await?;
|
||||
connect_raw(stream, self).await
|
||||
let mut stream = StartupStream::new(stream);
|
||||
connect_raw::startup(&mut stream, self).await?;
|
||||
connect_raw::authenticate(&mut stream, self).await?;
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
pub async fn authenticate<S, T>(
|
||||
&self,
|
||||
stream: MaybeTlsStream<S, T>,
|
||||
) -> Result<RawConnection<S, T>, Error>
|
||||
pub async fn authenticate<S, T>(&self, stream: &mut StartupStream<S, T>) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
connect_raw(stream, self).await
|
||||
connect_raw::startup(stream, self).await?;
|
||||
connect_raw::authenticate(stream, self).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use futures_util::TryStreamExt;
|
||||
use postgres_protocol2::message::backend::Message;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::client::SocketConfig;
|
||||
use crate::config::Host;
|
||||
use crate::connect_raw::connect_raw;
|
||||
use crate::connect_raw::StartupStream;
|
||||
use crate::connect_socket::connect_socket;
|
||||
use crate::connect_tls::connect_tls;
|
||||
use crate::tls::{MakeTlsConnect, TlsConnect};
|
||||
use crate::{Client, Config, Connection, Error, RawConnection};
|
||||
use crate::{Client, Config, Connection, Error};
|
||||
|
||||
pub async fn connect<T>(
|
||||
tls: &T,
|
||||
@@ -43,14 +45,8 @@ where
|
||||
T: TlsConnect<TcpStream>,
|
||||
{
|
||||
let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?;
|
||||
let stream = connect_tls(socket, config.ssl_mode, tls).await?;
|
||||
let RawConnection {
|
||||
stream,
|
||||
parameters: _,
|
||||
delayed_notice: _,
|
||||
process_id,
|
||||
secret_key,
|
||||
} = connect_raw(stream, config).await?;
|
||||
let mut stream = config.tls_and_authenticate(socket, tls).await?;
|
||||
let (process_id, secret_key) = wait_until_ready(&mut stream).await?;
|
||||
|
||||
let socket_config = SocketConfig {
|
||||
host_addr,
|
||||
@@ -70,7 +66,32 @@ where
|
||||
secret_key,
|
||||
);
|
||||
|
||||
let stream = stream.into_framed();
|
||||
let connection = Connection::new(stream, conn_tx, conn_rx);
|
||||
|
||||
Ok((client, connection))
|
||||
}
|
||||
|
||||
async fn wait_until_ready<S, T>(stream: &mut StartupStream<S, T>) -> Result<(i32, i32), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut process_id = 0;
|
||||
let mut secret_key = 0;
|
||||
|
||||
loop {
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::BackendKeyData(body)) => {
|
||||
process_id = body.process_id();
|
||||
secret_key = body.secret_key();
|
||||
}
|
||||
// These values are currently not used by `Client`/`Connection`. Ignore them.
|
||||
Some(Message::ParameterStatus(_)) | Some(Message::NoticeResponse(_)) => {}
|
||||
Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key)),
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
Some(_) => return Err(Error::unexpected_message()),
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +1,26 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::task::{Context, Poll, ready};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use fallible_iterator::FallibleIterator;
|
||||
use futures_util::{Sink, SinkExt, Stream, TryStreamExt, ready};
|
||||
use futures_util::{Sink, SinkExt, Stream, TryStreamExt};
|
||||
use postgres_protocol2::authentication::sasl;
|
||||
use postgres_protocol2::authentication::sasl::ScramSha256;
|
||||
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message, NoticeResponseBody};
|
||||
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message};
|
||||
use postgres_protocol2::message::frontend;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::codec::Framed;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tokio_util::codec::{Framed, FramedParts, FramedWrite};
|
||||
|
||||
use crate::Error;
|
||||
use crate::codec::{BackendMessage, BackendMessages, PostgresCodec};
|
||||
use crate::codec::PostgresCodec;
|
||||
use crate::config::{self, AuthKeys, Config};
|
||||
use crate::maybe_tls_stream::MaybeTlsStream;
|
||||
use crate::tls::TlsStream;
|
||||
|
||||
pub struct StartupStream<S, T> {
|
||||
inner: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
buf: BackendMessages,
|
||||
delayed_notice: Vec<NoticeResponseBody>,
|
||||
inner: FramedWrite<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
read_buf: BytesMut,
|
||||
}
|
||||
|
||||
impl<S, T> Sink<Bytes> for StartupStream<S, T>
|
||||
@@ -56,63 +54,93 @@ where
|
||||
{
|
||||
type Item = io::Result<Message>;
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// read 1 byte tag, 4 bytes length.
|
||||
let header = ready!(self.as_mut().poll_fill_buf_exact(cx, 5)?);
|
||||
|
||||
let len = u32::from_be_bytes(header[1..5].try_into().unwrap());
|
||||
if len < 4 {
|
||||
return Poll::Ready(Some(Err(std::io::Error::other(
|
||||
"postgres message too small",
|
||||
))));
|
||||
}
|
||||
if len >= 65536 {
|
||||
return Poll::Ready(Some(Err(std::io::Error::other(
|
||||
"postgres message too large",
|
||||
))));
|
||||
}
|
||||
|
||||
// the tag is an additional byte.
|
||||
let _message = ready!(self.as_mut().poll_fill_buf_exact(cx, len as usize + 1)?);
|
||||
|
||||
// Message::parse will remove the all the bytes from the buffer.
|
||||
Poll::Ready(Message::parse(&mut self.read_buf).transpose())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, T> StartupStream<S, T>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
/// Fill the buffer until it's the exact length provided. No additional data will be read from the socket.
|
||||
///
|
||||
/// If the current buffer length is greater, nothing happens.
|
||||
fn poll_fill_buf_exact(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<io::Result<Message>>> {
|
||||
loop {
|
||||
match self.buf.next() {
|
||||
Ok(Some(message)) => return Poll::Ready(Some(Ok(message))),
|
||||
Ok(None) => {}
|
||||
Err(e) => return Poll::Ready(Some(Err(e))),
|
||||
len: usize,
|
||||
) -> Poll<Result<&[u8], std::io::Error>> {
|
||||
let this = self.get_mut();
|
||||
let mut stream = Pin::new(this.inner.get_mut());
|
||||
|
||||
let mut n = this.read_buf.len();
|
||||
while n < len {
|
||||
this.read_buf.resize(len, 0);
|
||||
|
||||
let mut buf = ReadBuf::new(&mut this.read_buf[..]);
|
||||
buf.set_filled(n);
|
||||
|
||||
if stream.as_mut().poll_read(cx, &mut buf)?.is_pending() {
|
||||
this.read_buf.truncate(n);
|
||||
return Poll::Pending;
|
||||
}
|
||||
|
||||
match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
|
||||
Some(Ok(BackendMessage::Normal { messages, .. })) => self.buf = messages,
|
||||
Some(Ok(BackendMessage::Async(message))) => return Poll::Ready(Some(Ok(message))),
|
||||
Some(Err(e)) => return Poll::Ready(Some(Err(e))),
|
||||
None => return Poll::Ready(None),
|
||||
if buf.filled().len() == n {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"early eof",
|
||||
)));
|
||||
}
|
||||
n = buf.filled().len();
|
||||
|
||||
this.read_buf.truncate(n);
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(&this.read_buf[..len]))
|
||||
}
|
||||
|
||||
pub fn into_framed(mut self) -> Framed<MaybeTlsStream<S, T>, PostgresCodec> {
|
||||
let write_buf = std::mem::take(self.inner.write_buffer_mut());
|
||||
let io = self.inner.into_inner();
|
||||
let mut parts = FramedParts::new(io, PostgresCodec);
|
||||
parts.read_buf = self.read_buf;
|
||||
parts.write_buf = write_buf;
|
||||
Framed::from_parts(parts)
|
||||
}
|
||||
|
||||
pub fn new(io: MaybeTlsStream<S, T>) -> Self {
|
||||
Self {
|
||||
inner: FramedWrite::new(io, PostgresCodec),
|
||||
read_buf: BytesMut::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RawConnection<S, T> {
|
||||
pub stream: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
|
||||
pub parameters: HashMap<String, String>,
|
||||
pub delayed_notice: Vec<NoticeResponseBody>,
|
||||
pub process_id: i32,
|
||||
pub secret_key: i32,
|
||||
}
|
||||
|
||||
pub async fn connect_raw<S, T>(
|
||||
stream: MaybeTlsStream<S, T>,
|
||||
pub(crate) async fn startup<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
config: &Config,
|
||||
) -> Result<RawConnection<S, T>, Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
{
|
||||
let mut stream = StartupStream {
|
||||
inner: Framed::new(stream, PostgresCodec),
|
||||
buf: BackendMessages::empty(),
|
||||
delayed_notice: Vec::new(),
|
||||
};
|
||||
|
||||
startup(&mut stream, config).await?;
|
||||
authenticate(&mut stream, config).await?;
|
||||
let (process_id, secret_key, parameters) = read_info(&mut stream).await?;
|
||||
|
||||
Ok(RawConnection {
|
||||
stream: stream.inner,
|
||||
parameters,
|
||||
delayed_notice: stream.delayed_notice,
|
||||
process_id,
|
||||
secret_key,
|
||||
})
|
||||
}
|
||||
|
||||
async fn startup<S, T>(stream: &mut StartupStream<S, T>, config: &Config) -> Result<(), Error>
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
@@ -123,7 +151,10 @@ where
|
||||
stream.send(buf.freeze()).await.map_err(Error::io)
|
||||
}
|
||||
|
||||
async fn authenticate<S, T>(stream: &mut StartupStream<S, T>, config: &Config) -> Result<(), Error>
|
||||
pub(crate) async fn authenticate<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
config: &Config,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: TlsStream + Unpin,
|
||||
@@ -278,35 +309,3 @@ where
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_info<S, T>(
|
||||
stream: &mut StartupStream<S, T>,
|
||||
) -> Result<(i32, i32, HashMap<String, String>), Error>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let mut process_id = 0;
|
||||
let mut secret_key = 0;
|
||||
let mut parameters = HashMap::new();
|
||||
|
||||
loop {
|
||||
match stream.try_next().await.map_err(Error::io)? {
|
||||
Some(Message::BackendKeyData(body)) => {
|
||||
process_id = body.process_id();
|
||||
secret_key = body.secret_key();
|
||||
}
|
||||
Some(Message::ParameterStatus(body)) => {
|
||||
parameters.insert(
|
||||
body.name().map_err(Error::parse)?.to_string(),
|
||||
body.value().map_err(Error::parse)?.to_string(),
|
||||
);
|
||||
}
|
||||
Some(Message::NoticeResponse(body)) => stream.delayed_notice.push(body),
|
||||
Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key, parameters)),
|
||||
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
|
||||
Some(_) => return Err(Error::unexpected_message()),
|
||||
None => return Err(Error::closed()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,16 +452,16 @@ impl Error {
|
||||
Error(Box::new(ErrorInner { kind, cause }))
|
||||
}
|
||||
|
||||
pub(crate) fn closed() -> Error {
|
||||
pub fn closed() -> Error {
|
||||
Error::new(Kind::Closed, None)
|
||||
}
|
||||
|
||||
pub(crate) fn unexpected_message() -> Error {
|
||||
pub fn unexpected_message() -> Error {
|
||||
Error::new(Kind::UnexpectedMessage, None)
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub(crate) fn db(error: ErrorResponseBody) -> Error {
|
||||
pub fn db(error: ErrorResponseBody) -> Error {
|
||||
match DbError::parse(&mut error.fields()) {
|
||||
Ok(e) => Error::new(Kind::Db, Some(Box::new(e))),
|
||||
Err(e) => Error::new(Kind::Parse, Some(Box::new(e))),
|
||||
@@ -493,7 +493,7 @@ impl Error {
|
||||
Error::new(Kind::Tls, Some(e))
|
||||
}
|
||||
|
||||
pub(crate) fn io(e: io::Error) -> Error {
|
||||
pub fn io(e: io::Error) -> Error {
|
||||
Error::new(Kind::Io, Some(Box::new(e)))
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ use postgres_protocol2::message::backend::ReadyForQueryBody;
|
||||
pub use crate::cancel_token::{CancelToken, RawCancelToken};
|
||||
pub use crate::client::{Client, SocketConfig};
|
||||
pub use crate::config::Config;
|
||||
pub use crate::connect_raw::RawConnection;
|
||||
pub use crate::connection::Connection;
|
||||
pub use crate::error::Error;
|
||||
pub use crate::generic_client::GenericClient;
|
||||
@@ -50,7 +49,7 @@ mod client;
|
||||
mod codec;
|
||||
pub mod config;
|
||||
mod connect;
|
||||
mod connect_raw;
|
||||
pub mod connect_raw;
|
||||
mod connect_socket;
|
||||
mod connect_tls;
|
||||
mod connection;
|
||||
|
||||
@@ -43,7 +43,7 @@ itertools.workspace = true
|
||||
sync_wrapper = { workspace = true, features = ["futures"] }
|
||||
|
||||
byteorder = "1.4"
|
||||
rand = "0.8.5"
|
||||
rand.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
camino-tempfile.workspace = true
|
||||
|
||||
@@ -81,7 +81,7 @@ impl UnreliableWrapper {
|
||||
///
|
||||
fn attempt(&self, op: RemoteOp) -> anyhow::Result<u64> {
|
||||
let mut attempts = self.attempts.lock().unwrap();
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
match attempts.entry(op) {
|
||||
Entry::Occupied(mut e) => {
|
||||
@@ -94,7 +94,7 @@ impl UnreliableWrapper {
|
||||
/* BEGIN_HADRON */
|
||||
// If there are more attempts to fail, fail the request by probability.
|
||||
if (attempts_before_this < self.attempts_to_fail)
|
||||
&& (rng.gen_range(0..=100) < self.attempt_failure_probability)
|
||||
&& (rng.random_range(0..=100) < self.attempt_failure_probability)
|
||||
{
|
||||
let error =
|
||||
anyhow::anyhow!("simulated failure of remote operation {:?}", e.key());
|
||||
|
||||
@@ -208,7 +208,7 @@ async fn create_azure_client(
|
||||
.as_millis();
|
||||
|
||||
// because nanos can be the same for two threads so can millis, add randomness
|
||||
let random = rand::thread_rng().r#gen::<u32>();
|
||||
let random = rand::rng().random::<u32>();
|
||||
|
||||
let remote_storage_config = RemoteStorageConfig {
|
||||
storage: RemoteStorageKind::AzureContainer(AzureConfig {
|
||||
|
||||
@@ -385,7 +385,7 @@ async fn create_s3_client(
|
||||
.as_millis();
|
||||
|
||||
// because nanos can be the same for two threads so can millis, add randomness
|
||||
let random = rand::thread_rng().r#gen::<u32>();
|
||||
let random = rand::rng().random::<u32>();
|
||||
|
||||
let remote_storage_config = RemoteStorageConfig {
|
||||
storage: RemoteStorageKind::AwsS3(S3Config {
|
||||
|
||||
@@ -301,7 +301,12 @@ pub struct PullTimelineRequest {
|
||||
pub tenant_id: TenantId,
|
||||
pub timeline_id: TimelineId,
|
||||
pub http_hosts: Vec<String>,
|
||||
pub ignore_tombstone: Option<bool>,
|
||||
/// Membership configuration to switch to after pull.
|
||||
/// It guarantees that if pull_timeline returns successfully, the timeline will
|
||||
/// not be deleted by request with an older generation.
|
||||
/// Storage controller always sets this field.
|
||||
/// None is only allowed for manual pull_timeline requests.
|
||||
pub mconf: Option<Configuration>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
//! Helper functions to set up OpenTelemetry tracing.
|
||||
//!
|
||||
//! This comes in two variants, depending on whether you have a Tokio runtime available.
|
||||
//! If you do, call `init_tracing()`. It sets up the trace processor and exporter to use
|
||||
//! the current tokio runtime. If you don't have a runtime available, or you don't want
|
||||
//! to share the runtime with the tracing tasks, call `init_tracing_without_runtime()`
|
||||
//! instead. It sets up a dedicated single-threaded Tokio runtime for the tracing tasks.
|
||||
//!
|
||||
//! Example:
|
||||
//!
|
||||
//! ```rust,no_run
|
||||
@@ -21,7 +15,8 @@
|
||||
//! .with_writer(std::io::stderr);
|
||||
//!
|
||||
//! // Initialize OpenTelemetry. Exports tracing spans as OpenTelemetry traces
|
||||
//! let otlp_layer = tracing_utils::init_tracing("my_application", tracing_utils::ExportConfig::default()).await;
|
||||
//! let provider = tracing_utils::init_tracing("my_application", tracing_utils::ExportConfig::default());
|
||||
//! let otlp_layer = provider.as_ref().map(tracing_utils::layer);
|
||||
//!
|
||||
//! // Put it all together
|
||||
//! tracing_subscriber::registry()
|
||||
@@ -36,16 +31,18 @@
|
||||
pub mod http;
|
||||
pub mod perf_span;
|
||||
|
||||
use opentelemetry::KeyValue;
|
||||
use opentelemetry::trace::TracerProvider;
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
pub use opentelemetry_otlp::{ExportConfig, Protocol};
|
||||
use opentelemetry_sdk::trace::SdkTracerProvider;
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing::{Dispatch, Subscriber};
|
||||
use tracing_subscriber::Layer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::registry::LookupSpan;
|
||||
|
||||
pub type Provider = SdkTracerProvider;
|
||||
|
||||
/// Set up OpenTelemetry exporter, using configuration from environment variables.
|
||||
///
|
||||
/// `service_name` is set as the OpenTelemetry 'service.name' resource (see
|
||||
@@ -70,16 +67,7 @@ use tracing_subscriber::registry::LookupSpan;
|
||||
/// If you need some other setting, please test if it works first. And perhaps
|
||||
/// add a comment in the list above to save the effort of testing for the next
|
||||
/// person.
|
||||
///
|
||||
/// This doesn't block, but is marked as 'async' to hint that this must be called in
|
||||
/// asynchronous execution context.
|
||||
pub async fn init_tracing<S>(
|
||||
service_name: &str,
|
||||
export_config: ExportConfig,
|
||||
) -> Option<impl Layer<S>>
|
||||
where
|
||||
S: Subscriber + for<'span> LookupSpan<'span>,
|
||||
{
|
||||
pub fn init_tracing(service_name: &str, export_config: ExportConfig) -> Option<Provider> {
|
||||
if std::env::var("OTEL_SDK_DISABLED") == Ok("true".to_string()) {
|
||||
return None;
|
||||
};
|
||||
@@ -89,52 +77,14 @@ where
|
||||
))
|
||||
}
|
||||
|
||||
/// Like `init_tracing`, but creates a separate tokio Runtime for the tracing
|
||||
/// tasks.
|
||||
pub fn init_tracing_without_runtime<S>(
|
||||
service_name: &str,
|
||||
export_config: ExportConfig,
|
||||
) -> Option<impl Layer<S>>
|
||||
pub fn layer<S>(p: &Provider) -> impl Layer<S>
|
||||
where
|
||||
S: Subscriber + for<'span> LookupSpan<'span>,
|
||||
{
|
||||
if std::env::var("OTEL_SDK_DISABLED") == Ok("true".to_string()) {
|
||||
return None;
|
||||
};
|
||||
|
||||
// The opentelemetry batch processor and the OTLP exporter needs a Tokio
|
||||
// runtime. Create a dedicated runtime for them. One thread should be
|
||||
// enough.
|
||||
//
|
||||
// (Alternatively, instead of batching, we could use the "simple
|
||||
// processor", which doesn't need Tokio, and use "reqwest-blocking"
|
||||
// feature for the OTLP exporter, which also doesn't need Tokio. However,
|
||||
// batching is considered best practice, and also I have the feeling that
|
||||
// the non-Tokio codepaths in the opentelemetry crate are less used and
|
||||
// might be more buggy, so better to stay on the well-beaten path.)
|
||||
//
|
||||
// We leak the runtime so that it keeps running after we exit the
|
||||
// function.
|
||||
let runtime = Box::leak(Box::new(
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("otlp runtime thread")
|
||||
.worker_threads(1)
|
||||
.build()
|
||||
.unwrap(),
|
||||
));
|
||||
let _guard = runtime.enter();
|
||||
|
||||
Some(init_tracing_internal(
|
||||
service_name.to_string(),
|
||||
export_config,
|
||||
))
|
||||
tracing_opentelemetry::layer().with_tracer(p.tracer("global"))
|
||||
}
|
||||
|
||||
fn init_tracing_internal<S>(service_name: String, export_config: ExportConfig) -> impl Layer<S>
|
||||
where
|
||||
S: Subscriber + for<'span> LookupSpan<'span>,
|
||||
{
|
||||
fn init_tracing_internal(service_name: String, export_config: ExportConfig) -> Provider {
|
||||
// Sets up exporter from the provided [`ExportConfig`] parameter.
|
||||
// If the endpoint is not specified, it is loaded from the
|
||||
// OTEL_EXPORTER_OTLP_ENDPOINT environment variable.
|
||||
@@ -153,22 +103,14 @@ where
|
||||
opentelemetry_sdk::propagation::TraceContextPropagator::new(),
|
||||
);
|
||||
|
||||
let tracer = opentelemetry_sdk::trace::TracerProvider::builder()
|
||||
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
|
||||
.with_resource(opentelemetry_sdk::Resource::new(vec![KeyValue::new(
|
||||
opentelemetry_semantic_conventions::resource::SERVICE_NAME,
|
||||
service_name,
|
||||
)]))
|
||||
Provider::builder()
|
||||
.with_batch_exporter(exporter)
|
||||
.with_resource(
|
||||
opentelemetry_sdk::Resource::builder()
|
||||
.with_service_name(service_name)
|
||||
.build(),
|
||||
)
|
||||
.build()
|
||||
.tracer("global");
|
||||
|
||||
tracing_opentelemetry::layer().with_tracer(tracer)
|
||||
}
|
||||
|
||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||
// pending traces before we exit.
|
||||
pub fn shutdown_tracing() {
|
||||
opentelemetry::global::shutdown_tracer_provider();
|
||||
}
|
||||
|
||||
pub enum OtelEnablement {
|
||||
@@ -176,17 +118,17 @@ pub enum OtelEnablement {
|
||||
Enabled {
|
||||
service_name: String,
|
||||
export_config: ExportConfig,
|
||||
runtime: &'static tokio::runtime::Runtime,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct OtelGuard {
|
||||
provider: Provider,
|
||||
pub dispatch: Dispatch,
|
||||
}
|
||||
|
||||
impl Drop for OtelGuard {
|
||||
fn drop(&mut self) {
|
||||
shutdown_tracing();
|
||||
_ = self.provider.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,22 +141,19 @@ impl Drop for OtelGuard {
|
||||
/// The lifetime of the guard should match taht of the application. On drop, it tears down the
|
||||
/// OTEL infra.
|
||||
pub fn init_performance_tracing(otel_enablement: OtelEnablement) -> Option<OtelGuard> {
|
||||
let otel_subscriber = match otel_enablement {
|
||||
match otel_enablement {
|
||||
OtelEnablement::Disabled => None,
|
||||
OtelEnablement::Enabled {
|
||||
service_name,
|
||||
export_config,
|
||||
runtime,
|
||||
} => {
|
||||
let otel_layer = runtime
|
||||
.block_on(init_tracing(&service_name, export_config))
|
||||
.with_filter(LevelFilter::INFO);
|
||||
let provider = init_tracing(&service_name, export_config)?;
|
||||
|
||||
let otel_layer = layer(&provider).with_filter(LevelFilter::INFO);
|
||||
let otel_subscriber = tracing_subscriber::registry().with(otel_layer);
|
||||
let otel_dispatch = Dispatch::new(otel_subscriber);
|
||||
let dispatch = Dispatch::new(otel_subscriber);
|
||||
|
||||
Some(otel_dispatch)
|
||||
Some(OtelGuard { dispatch, provider })
|
||||
}
|
||||
};
|
||||
|
||||
otel_subscriber.map(|dispatch| OtelGuard { dispatch })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ impl Id {
|
||||
|
||||
pub fn generate() -> Self {
|
||||
let mut tli_buf = [0u8; 16];
|
||||
rand::thread_rng().fill(&mut tli_buf);
|
||||
rand::rng().fill(&mut tli_buf);
|
||||
Id::from(tli_buf)
|
||||
}
|
||||
|
||||
|
||||
@@ -364,42 +364,37 @@ impl MonotonicCounter<Lsn> for RecordLsn {
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements [`rand::distributions::uniform::UniformSampler`] so we can sample [`Lsn`]s.
|
||||
/// Implements [`rand::distr::uniform::UniformSampler`] so we can sample [`Lsn`]s.
|
||||
///
|
||||
/// This is used by the `pagebench` pageserver benchmarking tool.
|
||||
pub struct LsnSampler(<u64 as rand::distributions::uniform::SampleUniform>::Sampler);
|
||||
pub struct LsnSampler(<u64 as rand::distr::uniform::SampleUniform>::Sampler);
|
||||
|
||||
impl rand::distributions::uniform::SampleUniform for Lsn {
|
||||
impl rand::distr::uniform::SampleUniform for Lsn {
|
||||
type Sampler = LsnSampler;
|
||||
}
|
||||
|
||||
impl rand::distributions::uniform::UniformSampler for LsnSampler {
|
||||
impl rand::distr::uniform::UniformSampler for LsnSampler {
|
||||
type X = Lsn;
|
||||
|
||||
fn new<B1, B2>(low: B1, high: B2) -> Self
|
||||
fn new<B1, B2>(low: B1, high: B2) -> Result<Self, rand::distr::uniform::Error>
|
||||
where
|
||||
B1: rand::distributions::uniform::SampleBorrow<Self::X> + Sized,
|
||||
B2: rand::distributions::uniform::SampleBorrow<Self::X> + Sized,
|
||||
B1: rand::distr::uniform::SampleBorrow<Self::X> + Sized,
|
||||
B2: rand::distr::uniform::SampleBorrow<Self::X> + Sized,
|
||||
{
|
||||
Self(
|
||||
<u64 as rand::distributions::uniform::SampleUniform>::Sampler::new(
|
||||
low.borrow().0,
|
||||
high.borrow().0,
|
||||
),
|
||||
)
|
||||
<u64 as rand::distr::uniform::SampleUniform>::Sampler::new(low.borrow().0, high.borrow().0)
|
||||
.map(Self)
|
||||
}
|
||||
|
||||
fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
|
||||
fn new_inclusive<B1, B2>(low: B1, high: B2) -> Result<Self, rand::distr::uniform::Error>
|
||||
where
|
||||
B1: rand::distributions::uniform::SampleBorrow<Self::X> + Sized,
|
||||
B2: rand::distributions::uniform::SampleBorrow<Self::X> + Sized,
|
||||
B1: rand::distr::uniform::SampleBorrow<Self::X> + Sized,
|
||||
B2: rand::distr::uniform::SampleBorrow<Self::X> + Sized,
|
||||
{
|
||||
Self(
|
||||
<u64 as rand::distributions::uniform::SampleUniform>::Sampler::new_inclusive(
|
||||
low.borrow().0,
|
||||
high.borrow().0,
|
||||
),
|
||||
<u64 as rand::distr::uniform::SampleUniform>::Sampler::new_inclusive(
|
||||
low.borrow().0,
|
||||
high.borrow().0,
|
||||
)
|
||||
.map(Self)
|
||||
}
|
||||
|
||||
fn sample<R: rand::prelude::Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
|
||||
|
||||
@@ -11,7 +11,8 @@ use pageserver::tenant::layer_map::LayerMap;
|
||||
use pageserver::tenant::storage_layer::{LayerName, PersistentLayerDesc};
|
||||
use pageserver_api::key::Key;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||
use rand::prelude::{SeedableRng, StdRng};
|
||||
use rand::seq::IndexedRandom;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ async fn simulate(cmd: &SimulateCmd, results_path: &Path) -> anyhow::Result<()>
|
||||
let cold_key_range = splitpoint..key_range.end;
|
||||
|
||||
for i in 0..cmd.num_records {
|
||||
let chosen_range = if rand::thread_rng().gen_bool(0.9) {
|
||||
let chosen_range = if rand::rng().random_bool(0.9) {
|
||||
&hot_key_range
|
||||
} else {
|
||||
&cold_key_range
|
||||
|
||||
@@ -300,9 +300,9 @@ impl MockTimeline {
|
||||
key_range: &Range<Key>,
|
||||
) -> anyhow::Result<()> {
|
||||
crate::helpers::union_to_keyspace(&mut self.keyspace, vec![key_range.clone()]);
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
for _ in 0..num_records {
|
||||
self.ingest_record(rng.gen_range(key_range.clone()), len);
|
||||
self.ingest_record(rng.random_range(key_range.clone()), len);
|
||||
self.wal_ingested += len;
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -188,9 +188,9 @@ async fn main_impl(
|
||||
start_work_barrier.wait().await;
|
||||
loop {
|
||||
let (timeline, work) = {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
let target = all_targets.choose(&mut rng).unwrap();
|
||||
let lsn = target.lsn_range.clone().map(|r| rng.gen_range(r));
|
||||
let lsn = target.lsn_range.clone().map(|r| rng.random_range(r));
|
||||
(target.timeline, Work { lsn })
|
||||
};
|
||||
let sender = work_senders.get(&timeline).unwrap();
|
||||
|
||||
@@ -326,8 +326,7 @@ async fn main_impl(
|
||||
.cloned()
|
||||
.collect();
|
||||
let weights =
|
||||
rand::distributions::weighted::WeightedIndex::new(ranges.iter().map(|v| v.len()))
|
||||
.unwrap();
|
||||
rand::distr::weighted::WeightedIndex::new(ranges.iter().map(|v| v.len())).unwrap();
|
||||
|
||||
Box::pin(async move {
|
||||
let scheme = match Url::parse(&args.page_service_connstring) {
|
||||
@@ -427,7 +426,7 @@ async fn run_worker(
|
||||
cancel: CancellationToken,
|
||||
rps_period: Option<Duration>,
|
||||
ranges: Vec<KeyRange>,
|
||||
weights: rand::distributions::weighted::WeightedIndex<i128>,
|
||||
weights: rand::distr::weighted::WeightedIndex<i128>,
|
||||
) {
|
||||
shared_state.start_work_barrier.wait().await;
|
||||
let client_start = Instant::now();
|
||||
@@ -469,9 +468,9 @@ async fn run_worker(
|
||||
}
|
||||
|
||||
// Pick a random page from a random relation.
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
let r = &ranges[weights.sample(&mut rng)];
|
||||
let key: i128 = rng.gen_range(r.start..r.end);
|
||||
let key: i128 = rng.random_range(r.start..r.end);
|
||||
let (rel_tag, block_no) = key_to_block(key);
|
||||
|
||||
let mut blks = VecDeque::with_capacity(batch_size);
|
||||
@@ -502,7 +501,7 @@ async fn run_worker(
|
||||
// We assume that the entire batch can fit within the relation.
|
||||
assert_eq!(blks.len(), batch_size, "incomplete batch");
|
||||
|
||||
let req_lsn = if rng.gen_bool(args.req_latest_probability) {
|
||||
let req_lsn = if rng.random_bool(args.req_latest_probability) {
|
||||
Lsn::MAX
|
||||
} else {
|
||||
r.timeline_lsn
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::time::{Duration, Instant};
|
||||
use pageserver_api::models::HistoricLayerInfo;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_client::mgmt_api;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::seq::IndexedMutRandom;
|
||||
use tokio::sync::{OwnedSemaphorePermit, mpsc};
|
||||
use tokio::task::JoinSet;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -260,7 +260,7 @@ async fn timeline_actor(
|
||||
|
||||
loop {
|
||||
let layer_tx = {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
timeline.layers.choose_mut(&mut rng).expect("no layers")
|
||||
};
|
||||
match layer_tx.try_send(permit.take().unwrap()) {
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
//! from data stored in object storage.
|
||||
//!
|
||||
use std::fmt::Write as FmtWrite;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Instant, SystemTime};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
@@ -420,12 +421,16 @@ where
|
||||
}
|
||||
|
||||
let mut min_restart_lsn: Lsn = Lsn::MAX;
|
||||
|
||||
let mut dbdir_cnt = 0;
|
||||
let mut rel_cnt = 0;
|
||||
|
||||
// Create tablespace directories
|
||||
for ((spcnode, dbnode), has_relmap_file) in
|
||||
self.timeline.list_dbdirs(self.lsn, self.ctx).await?
|
||||
{
|
||||
self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
|
||||
|
||||
dbdir_cnt += 1;
|
||||
// If full backup is requested, include all relation files.
|
||||
// Otherwise only include init forks of unlogged relations.
|
||||
let rels = self
|
||||
@@ -433,6 +438,7 @@ where
|
||||
.list_rels(spcnode, dbnode, Version::at(self.lsn), self.ctx)
|
||||
.await?;
|
||||
for &rel in rels.iter() {
|
||||
rel_cnt += 1;
|
||||
// Send init fork as main fork to provide well formed empty
|
||||
// contents of UNLOGGED relations. Postgres copies it in
|
||||
// `reinit.c` during recovery.
|
||||
@@ -455,6 +461,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
self.timeline
|
||||
.db_rel_count
|
||||
.store(Some(Arc::new((dbdir_cnt, rel_cnt))));
|
||||
|
||||
let start_time = Instant::now();
|
||||
let aux_files = self
|
||||
.timeline
|
||||
|
||||
@@ -126,7 +126,6 @@ fn main() -> anyhow::Result<()> {
|
||||
Some(cfg) => tracing_utils::OtelEnablement::Enabled {
|
||||
service_name: "pageserver".to_string(),
|
||||
export_config: (&cfg.export_config).into(),
|
||||
runtime: *COMPUTE_REQUEST_RUNTIME,
|
||||
},
|
||||
None => tracing_utils::OtelEnablement::Disabled,
|
||||
};
|
||||
|
||||
@@ -42,6 +42,7 @@ pub trait StorageControllerUpcallApi {
|
||||
fn re_attach(
|
||||
&self,
|
||||
conf: &PageServerConf,
|
||||
empty_local_disk: bool,
|
||||
) -> impl Future<
|
||||
Output = Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError>,
|
||||
> + Send;
|
||||
@@ -155,6 +156,7 @@ impl StorageControllerUpcallApi for StorageControllerUpcallClient {
|
||||
async fn re_attach(
|
||||
&self,
|
||||
conf: &PageServerConf,
|
||||
empty_local_disk: bool,
|
||||
) -> Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError> {
|
||||
let url = self
|
||||
.base_url
|
||||
@@ -226,6 +228,7 @@ impl StorageControllerUpcallApi for StorageControllerUpcallClient {
|
||||
let request = ReAttachRequest {
|
||||
node_id: self.node_id,
|
||||
register: register.clone(),
|
||||
empty_local_disk: Some(empty_local_disk),
|
||||
};
|
||||
|
||||
let response: ReAttachResponse = self
|
||||
|
||||
@@ -768,6 +768,7 @@ mod test {
|
||||
async fn re_attach(
|
||||
&self,
|
||||
_conf: &PageServerConf,
|
||||
_empty_local_disk: bool,
|
||||
) -> Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
@@ -155,7 +155,9 @@ impl FeatureResolver {
|
||||
);
|
||||
|
||||
let tenant_properties = PerTenantProperties {
|
||||
remote_size_mb: Some(rand::thread_rng().gen_range(100.0..1000000.00)),
|
||||
remote_size_mb: Some(rand::rng().random_range(100.0..1000000.00)),
|
||||
db_count_max: Some(rand::rng().random_range(1..1000)),
|
||||
rel_count_max: Some(rand::rng().random_range(1..1000)),
|
||||
}
|
||||
.into_posthog_properties();
|
||||
|
||||
@@ -344,6 +346,8 @@ impl FeatureResolver {
|
||||
|
||||
struct PerTenantProperties {
|
||||
pub remote_size_mb: Option<f64>,
|
||||
pub db_count_max: Option<usize>,
|
||||
pub rel_count_max: Option<usize>,
|
||||
}
|
||||
|
||||
impl PerTenantProperties {
|
||||
@@ -355,6 +359,18 @@ impl PerTenantProperties {
|
||||
PostHogFlagFilterPropertyValue::Number(remote_size_mb),
|
||||
);
|
||||
}
|
||||
if let Some(db_count) = self.db_count_max {
|
||||
properties.insert(
|
||||
"tenant_db_count_max".to_string(),
|
||||
PostHogFlagFilterPropertyValue::Number(db_count as f64),
|
||||
);
|
||||
}
|
||||
if let Some(rel_count) = self.rel_count_max {
|
||||
properties.insert(
|
||||
"tenant_rel_count_max".to_string(),
|
||||
PostHogFlagFilterPropertyValue::Number(rel_count as f64),
|
||||
);
|
||||
}
|
||||
properties
|
||||
}
|
||||
}
|
||||
@@ -409,7 +425,11 @@ impl TenantFeatureResolver {
|
||||
|
||||
/// Refresh the cached properties and flags on the critical path.
|
||||
pub fn refresh_properties_and_flags(&self, tenant_shard: &TenantShard) {
|
||||
// Any of the remote size is none => this property is none.
|
||||
let mut remote_size_mb = Some(0.0);
|
||||
// Any of the db or rel count is available => this property is available.
|
||||
let mut db_count_max = None;
|
||||
let mut rel_count_max = None;
|
||||
for timeline in tenant_shard.list_timelines() {
|
||||
let size = timeline.metrics.resident_physical_size_get();
|
||||
if size == 0 {
|
||||
@@ -419,9 +439,25 @@ impl TenantFeatureResolver {
|
||||
if let Some(ref mut remote_size_mb) = remote_size_mb {
|
||||
*remote_size_mb += size as f64 / 1024.0 / 1024.0;
|
||||
}
|
||||
if let Some(data) = timeline.db_rel_count.load_full() {
|
||||
let (db_count, rel_count) = *data.as_ref();
|
||||
if db_count_max.is_none() {
|
||||
db_count_max = Some(db_count);
|
||||
}
|
||||
if rel_count_max.is_none() {
|
||||
rel_count_max = Some(rel_count);
|
||||
}
|
||||
db_count_max = db_count_max.map(|max| max.max(db_count));
|
||||
rel_count_max = rel_count_max.map(|max| max.max(rel_count));
|
||||
}
|
||||
}
|
||||
self.cached_tenant_properties.store(Arc::new(
|
||||
PerTenantProperties { remote_size_mb }.into_posthog_properties(),
|
||||
PerTenantProperties {
|
||||
remote_size_mb,
|
||||
db_count_max,
|
||||
rel_count_max,
|
||||
}
|
||||
.into_posthog_properties(),
|
||||
));
|
||||
|
||||
// BEGIN: Update the feature flag on the critical path.
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
//!
|
||||
use std::collections::{HashMap, HashSet, hash_map};
|
||||
use std::ops::{ControlFlow, Range};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::walingest::{WalIngestError, WalIngestErrorKind};
|
||||
use crate::{PERF_TRACE_TARGET, ensure_walingest};
|
||||
@@ -1254,11 +1255,16 @@ impl Timeline {
|
||||
let dbdir = DbDirectory::des(&buf)?;
|
||||
|
||||
let mut total_size: u64 = 0;
|
||||
let mut dbdir_cnt = 0;
|
||||
let mut rel_cnt = 0;
|
||||
|
||||
for (spcnode, dbnode) in dbdir.dbdirs.keys() {
|
||||
dbdir_cnt += 1;
|
||||
for rel in self
|
||||
.list_rels(*spcnode, *dbnode, Version::at(lsn), ctx)
|
||||
.await?
|
||||
{
|
||||
rel_cnt += 1;
|
||||
if self.cancel.is_cancelled() {
|
||||
return Err(CalculateLogicalSizeError::Cancelled);
|
||||
}
|
||||
@@ -1269,6 +1275,10 @@ impl Timeline {
|
||||
total_size += relsize as u64;
|
||||
}
|
||||
}
|
||||
|
||||
self.db_rel_count
|
||||
.store(Some(Arc::new((dbdir_cnt, rel_cnt))));
|
||||
|
||||
Ok(total_size * BLCKSZ as u64)
|
||||
}
|
||||
|
||||
|
||||
@@ -6161,11 +6161,11 @@ mod tests {
|
||||
use pageserver_api::keyspace::KeySpaceRandomAccum;
|
||||
use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings, LsnLease};
|
||||
use pageserver_compaction::helpers::overlaps_with;
|
||||
use rand::Rng;
|
||||
#[cfg(feature = "testing")]
|
||||
use rand::SeedableRng;
|
||||
#[cfg(feature = "testing")]
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, thread_rng};
|
||||
#[cfg(feature = "testing")]
|
||||
use std::ops::Range;
|
||||
use storage_layer::{IoConcurrency, PersistentLayerKey};
|
||||
@@ -6286,8 +6286,8 @@ mod tests {
|
||||
while lsn < lsn_range.end {
|
||||
let mut key = key_range.start;
|
||||
while key < key_range.end {
|
||||
let gap = random.gen_range(1..=100) <= spec.gap_chance;
|
||||
let will_init = random.gen_range(1..=100) <= spec.will_init_chance;
|
||||
let gap = random.random_range(1..=100) <= spec.gap_chance;
|
||||
let will_init = random.random_range(1..=100) <= spec.will_init_chance;
|
||||
|
||||
if gap {
|
||||
continue;
|
||||
@@ -6330,8 +6330,8 @@ mod tests {
|
||||
while lsn < lsn_range.end {
|
||||
let mut key = key_range.start;
|
||||
while key < key_range.end {
|
||||
let gap = random.gen_range(1..=100) <= spec.gap_chance;
|
||||
let will_init = random.gen_range(1..=100) <= spec.will_init_chance;
|
||||
let gap = random.random_range(1..=100) <= spec.gap_chance;
|
||||
let will_init = random.random_range(1..=100) <= spec.will_init_chance;
|
||||
|
||||
if gap {
|
||||
continue;
|
||||
@@ -7808,7 +7808,7 @@ mod tests {
|
||||
for _ in 0..50 {
|
||||
for _ in 0..NUM_KEYS {
|
||||
lsn = Lsn(lsn.0 + 0x10);
|
||||
let blknum = thread_rng().gen_range(0..NUM_KEYS);
|
||||
let blknum = rand::rng().random_range(0..NUM_KEYS);
|
||||
test_key.field6 = blknum as u32;
|
||||
let mut writer = tline.writer().await;
|
||||
writer
|
||||
@@ -7897,7 +7897,7 @@ mod tests {
|
||||
|
||||
for _ in 0..NUM_KEYS {
|
||||
lsn = Lsn(lsn.0 + 0x10);
|
||||
let blknum = thread_rng().gen_range(0..NUM_KEYS);
|
||||
let blknum = rand::rng().random_range(0..NUM_KEYS);
|
||||
test_key.field6 = blknum as u32;
|
||||
let mut writer = tline.writer().await;
|
||||
writer
|
||||
@@ -7965,7 +7965,7 @@ mod tests {
|
||||
|
||||
for _ in 0..NUM_KEYS {
|
||||
lsn = Lsn(lsn.0 + 0x10);
|
||||
let blknum = thread_rng().gen_range(0..NUM_KEYS);
|
||||
let blknum = rand::rng().random_range(0..NUM_KEYS);
|
||||
test_key.field6 = blknum as u32;
|
||||
let mut writer = tline.writer().await;
|
||||
writer
|
||||
@@ -8229,7 +8229,7 @@ mod tests {
|
||||
|
||||
for _ in 0..NUM_KEYS {
|
||||
lsn = Lsn(lsn.0 + 0x10);
|
||||
let blknum = thread_rng().gen_range(0..NUM_KEYS);
|
||||
let blknum = rand::rng().random_range(0..NUM_KEYS);
|
||||
test_key.field6 = (blknum * STEP) as u32;
|
||||
let mut writer = tline.writer().await;
|
||||
writer
|
||||
@@ -8502,7 +8502,7 @@ mod tests {
|
||||
for iter in 1..=10 {
|
||||
for _ in 0..NUM_KEYS {
|
||||
lsn = Lsn(lsn.0 + 0x10);
|
||||
let blknum = thread_rng().gen_range(0..NUM_KEYS);
|
||||
let blknum = rand::rng().random_range(0..NUM_KEYS);
|
||||
test_key.field6 = (blknum * STEP) as u32;
|
||||
let mut writer = tline.writer().await;
|
||||
writer
|
||||
@@ -11291,10 +11291,10 @@ mod tests {
|
||||
#[cfg(feature = "testing")]
|
||||
#[tokio::test]
|
||||
async fn test_read_path() -> anyhow::Result<()> {
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::seq::IndexedRandom;
|
||||
|
||||
let seed = if cfg!(feature = "fuzz-read-path") {
|
||||
let seed: u64 = thread_rng().r#gen();
|
||||
let seed: u64 = rand::rng().random();
|
||||
seed
|
||||
} else {
|
||||
// Use a hard-coded seed when not in fuzzing mode.
|
||||
@@ -11308,8 +11308,8 @@ mod tests {
|
||||
|
||||
let (queries, will_init_chance, gap_chance) = if cfg!(feature = "fuzz-read-path") {
|
||||
const QUERIES: u64 = 5000;
|
||||
let will_init_chance: u8 = random.gen_range(0..=10);
|
||||
let gap_chance: u8 = random.gen_range(0..=50);
|
||||
let will_init_chance: u8 = random.random_range(0..=10);
|
||||
let gap_chance: u8 = random.random_range(0..=50);
|
||||
|
||||
(QUERIES, will_init_chance, gap_chance)
|
||||
} else {
|
||||
@@ -11410,7 +11410,8 @@ mod tests {
|
||||
|
||||
while used_keys.len() < tenant.conf.max_get_vectored_keys.get() {
|
||||
let selected_lsn = interesting_lsns.choose(&mut random).expect("not empty");
|
||||
let mut selected_key = start_key.add(random.gen_range(0..KEY_DIMENSION_SIZE));
|
||||
let mut selected_key =
|
||||
start_key.add(random.random_range(0..KEY_DIMENSION_SIZE));
|
||||
|
||||
while used_keys.len() < tenant.conf.max_get_vectored_keys.get() {
|
||||
if used_keys.contains(&selected_key)
|
||||
@@ -11425,7 +11426,7 @@ mod tests {
|
||||
.add_key(selected_key);
|
||||
used_keys.insert(selected_key);
|
||||
|
||||
let pick_next = random.gen_range(0..=100) <= PICK_NEXT_CHANCE;
|
||||
let pick_next = random.random_range(0..=100) <= PICK_NEXT_CHANCE;
|
||||
if pick_next {
|
||||
selected_key = selected_key.next();
|
||||
} else {
|
||||
|
||||
@@ -535,8 +535,8 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
pub(crate) fn random_array(len: usize) -> Vec<u8> {
|
||||
let mut rng = rand::thread_rng();
|
||||
(0..len).map(|_| rng.r#gen()).collect::<_>()
|
||||
let mut rng = rand::rng();
|
||||
(0..len).map(|_| rng.random()).collect::<_>()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -588,9 +588,9 @@ pub(crate) mod tests {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
|
||||
let blobs = (0..1024)
|
||||
.map(|_| {
|
||||
let mut sz: u16 = rng.r#gen();
|
||||
let mut sz: u16 = rng.random();
|
||||
// Make 50% of the arrays small
|
||||
if rng.r#gen() {
|
||||
if rng.random() {
|
||||
sz &= 63;
|
||||
}
|
||||
random_array(sz.into())
|
||||
|
||||
@@ -1090,7 +1090,7 @@ pub(crate) mod tests {
|
||||
const NUM_KEYS: usize = 100000;
|
||||
let mut all_data: BTreeMap<u128, u64> = BTreeMap::new();
|
||||
for idx in 0..NUM_KEYS {
|
||||
let u: f64 = rand::thread_rng().gen_range(0.0..1.0);
|
||||
let u: f64 = rand::rng().random_range(0.0..1.0);
|
||||
let t = -(f64::ln(u));
|
||||
let key_int = (t * 1000000.0) as u128;
|
||||
|
||||
@@ -1116,7 +1116,7 @@ pub(crate) mod tests {
|
||||
|
||||
// Test get() operations on random keys, most of which will not exist
|
||||
for _ in 0..100000 {
|
||||
let key_int = rand::thread_rng().r#gen::<u128>();
|
||||
let key_int = rand::rng().random::<u128>();
|
||||
let search_key = u128::to_be_bytes(key_int);
|
||||
assert!(reader.get(&search_key, &ctx).await? == all_data.get(&key_int).cloned());
|
||||
}
|
||||
|
||||
@@ -508,8 +508,8 @@ mod tests {
|
||||
|
||||
let write_nbytes = cap * 2 + cap / 2;
|
||||
|
||||
let content: Vec<u8> = rand::thread_rng()
|
||||
.sample_iter(rand::distributions::Standard)
|
||||
let content: Vec<u8> = rand::rng()
|
||||
.sample_iter(rand::distr::StandardUniform)
|
||||
.take(write_nbytes)
|
||||
.collect();
|
||||
|
||||
@@ -565,8 +565,8 @@ mod tests {
|
||||
let cap = writer.mutable().capacity();
|
||||
drop(writer);
|
||||
|
||||
let content: Vec<u8> = rand::thread_rng()
|
||||
.sample_iter(rand::distributions::Standard)
|
||||
let content: Vec<u8> = rand::rng()
|
||||
.sample_iter(rand::distr::StandardUniform)
|
||||
.take(cap * 2 + cap / 2)
|
||||
.collect();
|
||||
|
||||
@@ -614,8 +614,8 @@ mod tests {
|
||||
let cap = mutable.capacity();
|
||||
let align = mutable.align();
|
||||
drop(writer);
|
||||
let content: Vec<u8> = rand::thread_rng()
|
||||
.sample_iter(rand::distributions::Standard)
|
||||
let content: Vec<u8> = rand::rng()
|
||||
.sample_iter(rand::distr::StandardUniform)
|
||||
.take(cap * 2 + cap / 2)
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ use pageserver_api::shard::{
|
||||
};
|
||||
use pageserver_api::upcall_api::ReAttachResponseTenant;
|
||||
use rand::Rng;
|
||||
use rand::distributions::Alphanumeric;
|
||||
use rand::distr::Alphanumeric;
|
||||
use remote_storage::TimeoutOrCancel;
|
||||
use sysinfo::SystemExt;
|
||||
use tokio::fs;
|
||||
@@ -218,7 +218,7 @@ async fn safe_rename_tenant_dir(path: impl AsRef<Utf8Path>) -> std::io::Result<U
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
"Path must be absolute",
|
||||
))?;
|
||||
let rand_suffix = rand::thread_rng()
|
||||
let rand_suffix = rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
@@ -352,7 +352,8 @@ async fn init_load_generations(
|
||||
let client = StorageControllerUpcallClient::new(conf, cancel);
|
||||
info!("Calling {} API to re-attach tenants", client.base_url());
|
||||
// If we are configured to use the control plane API, then it is the source of truth for what tenants to load.
|
||||
match client.re_attach(conf).await {
|
||||
let empty_local_disk = tenant_confs.is_empty();
|
||||
match client.re_attach(conf, empty_local_disk).await {
|
||||
Ok(tenants) => tenants
|
||||
.into_iter()
|
||||
.flat_map(|(id, rart)| {
|
||||
|
||||
@@ -25,7 +25,7 @@ pub(super) fn period_jitter(d: Duration, pct: u32) -> Duration {
|
||||
if d == Duration::ZERO {
|
||||
d
|
||||
} else {
|
||||
rand::thread_rng().gen_range((d * (100 - pct)) / 100..(d * (100 + pct)) / 100)
|
||||
rand::rng().random_range((d * (100 - pct)) / 100..(d * (100 + pct)) / 100)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ pub(super) fn period_warmup(period: Duration) -> Duration {
|
||||
if period == Duration::ZERO {
|
||||
period
|
||||
} else {
|
||||
rand::thread_rng().gen_range(Duration::ZERO..period)
|
||||
rand::rng().random_range(Duration::ZERO..period)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1634,7 +1634,8 @@ pub(crate) mod test {
|
||||
use bytes::Bytes;
|
||||
use itertools::MinMaxResult;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||
use rand::prelude::{SeedableRng, StdRng};
|
||||
use rand::seq::IndexedRandom;
|
||||
use rand::{Rng, RngCore};
|
||||
|
||||
/// Construct an index for a fictional delta layer and and then
|
||||
@@ -1788,14 +1789,14 @@ pub(crate) mod test {
|
||||
|
||||
let mut entries = Vec::new();
|
||||
for _ in 0..constants::KEY_COUNT {
|
||||
let count = rng.gen_range(1..constants::MAX_ENTRIES_PER_KEY);
|
||||
let count = rng.random_range(1..constants::MAX_ENTRIES_PER_KEY);
|
||||
let mut lsns_iter =
|
||||
std::iter::successors(Some(Lsn(constants::LSN_OFFSET.0 + 0x08)), |lsn| {
|
||||
Some(Lsn(lsn.0 + 0x08))
|
||||
});
|
||||
let mut lsns = Vec::new();
|
||||
while lsns.len() < count as usize {
|
||||
let take = rng.gen_bool(0.5);
|
||||
let take = rng.random_bool(0.5);
|
||||
let lsn = lsns_iter.next().unwrap();
|
||||
if take {
|
||||
lsns.push(lsn);
|
||||
@@ -1869,12 +1870,13 @@ pub(crate) mod test {
|
||||
for _ in 0..constants::RANGES_COUNT {
|
||||
let mut range: Option<Range<Key>> = Option::default();
|
||||
while range.is_none() || keyspace.overlaps(range.as_ref().unwrap()) {
|
||||
let range_start = rng.gen_range(start..end);
|
||||
let range_start = rng.random_range(start..end);
|
||||
let range_end_offset = range_start + constants::MIN_RANGE_SIZE;
|
||||
if range_end_offset >= end {
|
||||
range = Some(Key::from_i128(range_start)..Key::from_i128(end));
|
||||
} else {
|
||||
let range_end = rng.gen_range((range_start + constants::MIN_RANGE_SIZE)..end);
|
||||
let range_end =
|
||||
rng.random_range((range_start + constants::MIN_RANGE_SIZE)..end);
|
||||
range = Some(Key::from_i128(range_start)..Key::from_i128(range_end));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -440,8 +440,8 @@ mod tests {
|
||||
impl InMemoryFile {
|
||||
fn new_random(len: usize) -> Self {
|
||||
Self {
|
||||
content: rand::thread_rng()
|
||||
.sample_iter(rand::distributions::Standard)
|
||||
content: rand::rng()
|
||||
.sample_iter(rand::distr::StandardUniform)
|
||||
.take(len)
|
||||
.collect(),
|
||||
}
|
||||
@@ -498,7 +498,7 @@ mod tests {
|
||||
len
|
||||
}
|
||||
};
|
||||
rand::Rng::fill(&mut rand::thread_rng(), &mut dst_slice[nread..]); // to discover bugs
|
||||
rand::Rng::fill(&mut rand::rng(), &mut dst_slice[nread..]); // to discover bugs
|
||||
Ok((dst, nread))
|
||||
}
|
||||
}
|
||||
@@ -763,7 +763,7 @@ mod tests {
|
||||
let len = std::cmp::min(dst.bytes_total(), mocked_bytes.len());
|
||||
let dst_slice: &mut [u8] = dst.as_mut_rust_slice_full_zeroed();
|
||||
dst_slice[..len].copy_from_slice(&mocked_bytes[..len]);
|
||||
rand::Rng::fill(&mut rand::thread_rng(), &mut dst_slice[len..]); // to discover bugs
|
||||
rand::Rng::fill(&mut rand::rng(), &mut dst_slice[len..]); // to discover bugs
|
||||
Ok((dst, len))
|
||||
}
|
||||
Err(e) => Err(std::io::Error::other(e)),
|
||||
|
||||
@@ -515,7 +515,7 @@ pub(crate) async fn sleep_random_range(
|
||||
interval: RangeInclusive<Duration>,
|
||||
cancel: &CancellationToken,
|
||||
) -> Result<Duration, Cancelled> {
|
||||
let delay = rand::thread_rng().gen_range(interval);
|
||||
let delay = rand::rng().random_range(interval);
|
||||
if delay == Duration::ZERO {
|
||||
return Ok(delay);
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ pub struct Timeline {
|
||||
ancestor_lsn: Lsn,
|
||||
|
||||
// The LSN of gc-compaction that was last applied to this timeline.
|
||||
gc_compaction_state: ArcSwap<Option<GcCompactionState>>,
|
||||
gc_compaction_state: ArcSwapOption<GcCompactionState>,
|
||||
|
||||
pub(crate) metrics: Arc<TimelineMetrics>,
|
||||
|
||||
@@ -448,7 +448,11 @@ pub struct Timeline {
|
||||
/// A channel to send async requests to prepare a basebackup for the basebackup cache.
|
||||
basebackup_cache: Arc<BasebackupCache>,
|
||||
|
||||
#[expect(dead_code)]
|
||||
feature_resolver: Arc<TenantFeatureResolver>,
|
||||
|
||||
/// Basebackup will collect the count and store it here. Used for reldirv2 rollout.
|
||||
pub(crate) db_rel_count: ArcSwapOption<(usize, usize)>,
|
||||
}
|
||||
|
||||
pub(crate) enum PreviousHeatmap {
|
||||
@@ -2826,7 +2830,7 @@ impl Timeline {
|
||||
if r.numerator == 0 {
|
||||
false
|
||||
} else {
|
||||
rand::thread_rng().gen_range(0..r.denominator) < r.numerator
|
||||
rand::rng().random_range(0..r.denominator) < r.numerator
|
||||
}
|
||||
}
|
||||
None => false,
|
||||
@@ -3236,7 +3240,7 @@ impl Timeline {
|
||||
}),
|
||||
disk_consistent_lsn: AtomicLsn::new(disk_consistent_lsn.0),
|
||||
|
||||
gc_compaction_state: ArcSwap::new(Arc::new(gc_compaction_state)),
|
||||
gc_compaction_state: ArcSwapOption::from_pointee(gc_compaction_state),
|
||||
|
||||
last_freeze_at: AtomicLsn::new(disk_consistent_lsn.0),
|
||||
last_freeze_ts: RwLock::new(Instant::now()),
|
||||
@@ -3341,6 +3345,8 @@ impl Timeline {
|
||||
basebackup_cache: resources.basebackup_cache,
|
||||
|
||||
feature_resolver: resources.feature_resolver.clone(),
|
||||
|
||||
db_rel_count: ArcSwapOption::from_pointee(None),
|
||||
};
|
||||
|
||||
result.repartition_threshold =
|
||||
@@ -3412,7 +3418,7 @@ impl Timeline {
|
||||
gc_compaction_state: GcCompactionState,
|
||||
) -> anyhow::Result<()> {
|
||||
self.gc_compaction_state
|
||||
.store(Arc::new(Some(gc_compaction_state.clone())));
|
||||
.store(Some(Arc::new(gc_compaction_state.clone())));
|
||||
self.remote_client
|
||||
.schedule_index_upload_for_gc_compaction_state_update(gc_compaction_state)
|
||||
}
|
||||
@@ -3428,7 +3434,10 @@ impl Timeline {
|
||||
}
|
||||
|
||||
pub(crate) fn get_gc_compaction_state(&self) -> Option<GcCompactionState> {
|
||||
self.gc_compaction_state.load_full().as_ref().clone()
|
||||
self.gc_compaction_state
|
||||
.load()
|
||||
.as_ref()
|
||||
.map(|x| x.as_ref().clone())
|
||||
}
|
||||
|
||||
/// Creates and starts the wal receiver.
|
||||
@@ -3908,7 +3917,7 @@ impl Timeline {
|
||||
// 1hour base
|
||||
(60_i64 * 60_i64)
|
||||
// 10min jitter
|
||||
+ rand::thread_rng().gen_range(-10 * 60..10 * 60),
|
||||
+ rand::rng().random_range(-10 * 60..10 * 60),
|
||||
)
|
||||
.expect("10min < 1hour"),
|
||||
);
|
||||
|
||||
@@ -1326,13 +1326,7 @@ impl Timeline {
|
||||
.max()
|
||||
};
|
||||
|
||||
let (partition_mode, partition_lsn) = if cfg!(test)
|
||||
|| cfg!(feature = "testing")
|
||||
|| self
|
||||
.feature_resolver
|
||||
.evaluate_boolean("image-compaction-boundary")
|
||||
.is_ok()
|
||||
{
|
||||
let (partition_mode, partition_lsn) = {
|
||||
let last_repartition_lsn = self.partitioning.read().1;
|
||||
let lsn = match l0_l1_boundary_lsn {
|
||||
Some(boundary) => gc_cutoff
|
||||
@@ -1348,8 +1342,6 @@ impl Timeline {
|
||||
} else {
|
||||
("l0_l1_boundary", lsn)
|
||||
}
|
||||
} else {
|
||||
("latest_record", self.get_last_record_lsn())
|
||||
};
|
||||
|
||||
// 2. Repartition and create image layers if necessary
|
||||
|
||||
@@ -362,7 +362,7 @@ impl<T: Types> Cache<T> {
|
||||
tokio::time::sleep(RETRY_BACKOFF).await;
|
||||
continue;
|
||||
} else {
|
||||
tracing::warn!(
|
||||
tracing::info!(
|
||||
"Failed to resolve tenant shard after {} attempts: {:?}",
|
||||
GET_MAX_RETRIES,
|
||||
e
|
||||
|
||||
@@ -1275,8 +1275,8 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use owned_buffers_io::io_buf_ext::IoBufExt;
|
||||
use rand::Rng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{Rng, thread_rng};
|
||||
|
||||
use super::*;
|
||||
use crate::context::DownloadBehavior;
|
||||
@@ -1358,7 +1358,7 @@ mod tests {
|
||||
|
||||
// Check that all the other FDs still work too. Use them in random order for
|
||||
// good measure.
|
||||
file_b_dupes.as_mut_slice().shuffle(&mut thread_rng());
|
||||
file_b_dupes.as_mut_slice().shuffle(&mut rand::rng());
|
||||
for vfile in file_b_dupes.iter_mut() {
|
||||
assert_first_512_eq(vfile, b"content_b").await;
|
||||
}
|
||||
@@ -1413,9 +1413,8 @@ mod tests {
|
||||
let ctx = ctx.detached_child(TaskKind::UnitTest, DownloadBehavior::Error);
|
||||
let hdl = rt.spawn(async move {
|
||||
let mut buf = IoBufferMut::with_capacity_zeroed(SIZE);
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
for _ in 1..1000 {
|
||||
let f = &files[rng.gen_range(0..files.len())];
|
||||
let f = &files[rand::rng().random_range(0..files.len())];
|
||||
buf = f
|
||||
.read_exact_at(buf.slice_full(), 0, &ctx)
|
||||
.await
|
||||
|
||||
@@ -5,6 +5,7 @@ MODULE_big = neon
|
||||
OBJS = \
|
||||
$(WIN32RES) \
|
||||
communicator.o \
|
||||
communicator_process.o \
|
||||
extension_server.o \
|
||||
file_cache.o \
|
||||
hll.o \
|
||||
@@ -29,6 +30,11 @@ PG_CPPFLAGS = -I$(libpq_srcdir)
|
||||
SHLIB_LINK_INTERNAL = $(libpq)
|
||||
SHLIB_LINK = -lcurl
|
||||
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S), Darwin)
|
||||
SHLIB_LINK += -framework Security -framework CoreFoundation -framework SystemConfiguration
|
||||
endif
|
||||
|
||||
EXTENSION = neon
|
||||
DATA = \
|
||||
neon--1.0.sql \
|
||||
@@ -57,7 +63,8 @@ WALPROP_OBJS = \
|
||||
|
||||
# libcommunicator.a is built by cargo from the Rust sources under communicator/
|
||||
# subdirectory. `cargo build` also generates communicator_bindings.h.
|
||||
neon.o: communicator/communicator_bindings.h
|
||||
communicator_process.o: communicator/communicator_bindings.h
|
||||
file_cache.o: communicator/communicator_bindings.h
|
||||
|
||||
$(NEON_CARGO_ARTIFACT_TARGET_DIR)/libcommunicator.a communicator/communicator_bindings.h &:
|
||||
(cd $(srcdir)/communicator && cargo build $(CARGO_BUILD_FLAGS) $(CARGO_PROFILE))
|
||||
|
||||
@@ -1820,12 +1820,12 @@ nm_to_string(NeonMessage *msg)
|
||||
}
|
||||
case T_NeonGetPageResponse:
|
||||
{
|
||||
#if 0
|
||||
NeonGetPageResponse *msg_resp = (NeonGetPageResponse *) msg;
|
||||
#endif
|
||||
|
||||
appendStringInfoString(&s, "{\"type\": \"NeonGetPageResponse\"");
|
||||
appendStringInfo(&s, ", \"page\": \"XXX\"}");
|
||||
appendStringInfo(&s, ", \"rinfo\": %u/%u/%u", RelFileInfoFmt(msg_resp->req.rinfo));
|
||||
appendStringInfo(&s, ", \"forknum\": %d", msg_resp->req.forknum);
|
||||
appendStringInfo(&s, ", \"blkno\": %u", msg_resp->req.blkno);
|
||||
appendStringInfoChar(&s, '}');
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -11,9 +11,19 @@ crate-type = ["staticlib"]
|
||||
# 'testing' feature is currently unused in the communicator, but we accept it for convenience of
|
||||
# calling build scripts, so that you can pass the same feature to all packages.
|
||||
testing = []
|
||||
# 'rest_broker' feature is currently unused in the communicator, but we accept it for convenience of
|
||||
# calling build scripts, so that you can pass the same feature to all packages.
|
||||
rest_broker = []
|
||||
|
||||
[dependencies]
|
||||
neon-shmem.workspace = true
|
||||
axum.workspace = true
|
||||
http.workspace = true
|
||||
tokio = { workspace = true, features = ["macros", "net", "io-util", "rt", "rt-multi-thread"] }
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
|
||||
measured.workspace = true
|
||||
utils.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../../workspace_hack" }
|
||||
|
||||
[build-dependencies]
|
||||
|
||||
@@ -1,7 +1,22 @@
|
||||
This package will evolve into a "compute-pageserver communicator"
|
||||
process and machinery. For now, it's just a dummy that doesn't do
|
||||
anything interesting, but it allows us to test the compilation and
|
||||
linking of Rust code into the Postgres extensions.
|
||||
# Communicator
|
||||
|
||||
This package provides the so-called "compute-pageserver communicator",
|
||||
or just "communicator" in short. The communicator is a separate
|
||||
background worker process that runs in the PostgreSQL server. It's
|
||||
part of the neon extension. Currently, it only provides an HTTP
|
||||
endpoint for metrics, but in the future it will evolve to handle all
|
||||
communications with the pageservers.
|
||||
|
||||
## Source code view
|
||||
|
||||
pgxn/neon/communicator_process.c
|
||||
Contains code needed to start up the communicator process, and
|
||||
the glue that interacts with PostgreSQL code and the Rust
|
||||
code in the communicator process.
|
||||
|
||||
|
||||
pgxn/neon/communicator/src/worker_process/
|
||||
Worker process main loop and glue code
|
||||
|
||||
At compilation time, pgxn/neon/communicator/ produces a static
|
||||
library, libcommunicator.a. It is linked to the neon.so extension
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
/// dummy function, just to test linking Rust functions into the C
|
||||
/// extension
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn communicator_dummy(arg: u32) -> u32 {
|
||||
arg + 1
|
||||
}
|
||||
mod worker_process;
|
||||
|
||||
/// Name of the Unix Domain Socket that serves the metrics, and other APIs in the
|
||||
/// future. This is within the Postgres data directory.
|
||||
const NEON_COMMUNICATOR_SOCKET_NAME: &str = "neon-communicator.socket";
|
||||
|
||||
51
pgxn/neon/communicator/src/worker_process/callbacks.rs
Normal file
51
pgxn/neon/communicator/src/worker_process/callbacks.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
//! C callbacks to PostgreSQL facilities that the neon extension needs to provide. These
|
||||
//! are implemented in `neon/pgxn/communicator_process.c`. The function signatures better
|
||||
//! match!
|
||||
//!
|
||||
//! These are called from the communicator threads! Careful what you do, most Postgres
|
||||
//! functions are not safe to call in that context.
|
||||
|
||||
#[cfg(not(test))]
|
||||
unsafe extern "C" {
|
||||
pub fn callback_set_my_latch_unsafe();
|
||||
pub fn callback_get_lfc_metrics_unsafe() -> LfcMetrics;
|
||||
}
|
||||
|
||||
// Compile unit tests with dummy versions of the functions. Unit tests cannot call back
|
||||
// into the C code. (As of this writing, no unit tests even exists in the communicator
|
||||
// package, but the code coverage build still builds these and tries to link with the
|
||||
// external C code.)
|
||||
#[cfg(test)]
|
||||
unsafe fn callback_set_my_latch_unsafe() {
|
||||
panic!("not usable in unit tests");
|
||||
}
|
||||
#[cfg(test)]
|
||||
unsafe fn callback_get_lfc_metrics_unsafe() -> LfcMetrics {
|
||||
panic!("not usable in unit tests");
|
||||
}
|
||||
|
||||
// safe wrappers
|
||||
|
||||
pub(super) fn callback_set_my_latch() {
|
||||
unsafe { callback_set_my_latch_unsafe() };
|
||||
}
|
||||
|
||||
pub(super) fn callback_get_lfc_metrics() -> LfcMetrics {
|
||||
unsafe { callback_get_lfc_metrics_unsafe() }
|
||||
}
|
||||
|
||||
/// Return type of the callback_get_lfc_metrics() function.
|
||||
#[repr(C)]
|
||||
pub struct LfcMetrics {
|
||||
pub lfc_cache_size_limit: i64,
|
||||
pub lfc_hits: i64,
|
||||
pub lfc_misses: i64,
|
||||
pub lfc_used: i64,
|
||||
pub lfc_writes: i64,
|
||||
|
||||
// working set size looking back 1..60 minutes.
|
||||
//
|
||||
// Index 0 is the size of the working set accessed within last 1 minute,
|
||||
// index 59 is the size of the working set accessed within last 60 minutes.
|
||||
pub lfc_approximate_working_set_size_windows: [i64; 60],
|
||||
}
|
||||
102
pgxn/neon/communicator/src/worker_process/control_socket.rs
Normal file
102
pgxn/neon/communicator/src/worker_process/control_socket.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
//! Communicator control socket.
|
||||
//!
|
||||
//! Currently, the control socket is used to provide information about the communicator
|
||||
//! process, file cache etc. as prometheus metrics. In the future, it can be used to
|
||||
//! expose more things.
|
||||
//!
|
||||
//! The exporter speaks HTTP, listens on a Unix Domain Socket under the Postgres
|
||||
//! data directory. For debugging, you can access it with curl:
|
||||
//!
|
||||
//! ```sh
|
||||
//! curl --unix-socket neon-communicator.socket http://localhost/metrics
|
||||
//! ```
|
||||
//!
|
||||
use axum::Router;
|
||||
use axum::body::Body;
|
||||
use axum::extract::State;
|
||||
use axum::response::Response;
|
||||
use http::StatusCode;
|
||||
use http::header::CONTENT_TYPE;
|
||||
|
||||
use measured::MetricGroup;
|
||||
use measured::text::BufferedTextEncoder;
|
||||
|
||||
use std::io::ErrorKind;
|
||||
|
||||
use tokio::net::UnixListener;
|
||||
|
||||
use crate::NEON_COMMUNICATOR_SOCKET_NAME;
|
||||
use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct;
|
||||
|
||||
impl CommunicatorWorkerProcessStruct {
|
||||
/// Launch the listener
|
||||
pub(crate) async fn launch_control_socket_listener(
|
||||
&'static self,
|
||||
) -> Result<(), std::io::Error> {
|
||||
use axum::routing::get;
|
||||
let app = Router::new()
|
||||
.route("/metrics", get(get_metrics))
|
||||
.route("/autoscaling_metrics", get(get_autoscaling_metrics))
|
||||
.route("/debug/panic", get(handle_debug_panic))
|
||||
.with_state(self);
|
||||
|
||||
// If the server is restarted, there might be an old socket still
|
||||
// lying around. Remove it first.
|
||||
match std::fs::remove_file(NEON_COMMUNICATOR_SOCKET_NAME) {
|
||||
Ok(()) => {
|
||||
tracing::warn!("removed stale control socket");
|
||||
}
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => {}
|
||||
Err(e) => {
|
||||
tracing::error!("could not remove stale control socket: {e:#}");
|
||||
// Try to proceed anyway. It will likely fail below though.
|
||||
}
|
||||
};
|
||||
|
||||
// Create the unix domain socket and start listening on it
|
||||
let listener = UnixListener::bind(NEON_COMMUNICATOR_SOCKET_NAME)?;
|
||||
|
||||
tokio::spawn(async {
|
||||
tracing::info!("control socket listener spawned");
|
||||
axum::serve(listener, app)
|
||||
.await
|
||||
.expect("axum::serve never returns")
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Expose all Prometheus metrics.
|
||||
async fn get_metrics(State(state): State<&CommunicatorWorkerProcessStruct>) -> Response {
|
||||
tracing::trace!("/metrics requested");
|
||||
metrics_to_response(&state).await
|
||||
}
|
||||
|
||||
/// Expose Prometheus metrics, for use by the autoscaling agent.
|
||||
///
|
||||
/// This is a subset of all the metrics.
|
||||
async fn get_autoscaling_metrics(
|
||||
State(state): State<&CommunicatorWorkerProcessStruct>,
|
||||
) -> Response {
|
||||
tracing::trace!("/metrics requested");
|
||||
metrics_to_response(&state.lfc_metrics).await
|
||||
}
|
||||
|
||||
async fn handle_debug_panic(State(_state): State<&CommunicatorWorkerProcessStruct>) -> Response {
|
||||
panic!("test HTTP handler task panic");
|
||||
}
|
||||
|
||||
/// Helper function to convert prometheus metrics to a text response
|
||||
async fn metrics_to_response(metrics: &(dyn MetricGroup<BufferedTextEncoder> + Sync)) -> Response {
|
||||
let mut enc = BufferedTextEncoder::new();
|
||||
metrics
|
||||
.collect_group_into(&mut enc)
|
||||
.unwrap_or_else(|never| match never {});
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(CONTENT_TYPE, "application/text")
|
||||
.body(Body::from(enc.finish()))
|
||||
.unwrap()
|
||||
}
|
||||
83
pgxn/neon/communicator/src/worker_process/lfc_metrics.rs
Normal file
83
pgxn/neon/communicator/src/worker_process/lfc_metrics.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use measured::{
|
||||
FixedCardinalityLabel, Gauge, GaugeVec, LabelGroup, MetricGroup,
|
||||
label::{LabelName, LabelValue, StaticLabelSet},
|
||||
metric::{MetricEncoding, gauge::GaugeState, group::Encoding},
|
||||
};
|
||||
|
||||
use super::callbacks::callback_get_lfc_metrics;
|
||||
|
||||
pub(crate) struct LfcMetricsCollector;
|
||||
|
||||
#[derive(MetricGroup)]
|
||||
#[metric(new())]
|
||||
struct LfcMetricsGroup {
|
||||
/// LFC cache size limit in bytes
|
||||
lfc_cache_size_limit: Gauge,
|
||||
/// LFC cache hits
|
||||
lfc_hits: Gauge,
|
||||
/// LFC cache misses
|
||||
lfc_misses: Gauge,
|
||||
/// LFC chunks used (chunk = 1MB)
|
||||
lfc_used: Gauge,
|
||||
/// LFC cache writes
|
||||
lfc_writes: Gauge,
|
||||
/// Approximate working set size in pages of 8192 bytes
|
||||
#[metric(init = GaugeVec::dense())]
|
||||
lfc_approximate_working_set_size_windows: GaugeVec<StaticLabelSet<MinuteAsSeconds>>,
|
||||
}
|
||||
|
||||
impl<T: Encoding> MetricGroup<T> for LfcMetricsCollector
|
||||
where
|
||||
GaugeState: MetricEncoding<T>,
|
||||
{
|
||||
fn collect_group_into(&self, enc: &mut T) -> Result<(), <T as Encoding>::Err> {
|
||||
let g = LfcMetricsGroup::new();
|
||||
|
||||
let lfc_metrics = callback_get_lfc_metrics();
|
||||
|
||||
g.lfc_cache_size_limit.set(lfc_metrics.lfc_cache_size_limit);
|
||||
g.lfc_hits.set(lfc_metrics.lfc_hits);
|
||||
g.lfc_misses.set(lfc_metrics.lfc_misses);
|
||||
g.lfc_used.set(lfc_metrics.lfc_used);
|
||||
g.lfc_writes.set(lfc_metrics.lfc_writes);
|
||||
|
||||
for i in 0..60 {
|
||||
let val = lfc_metrics.lfc_approximate_working_set_size_windows[i];
|
||||
g.lfc_approximate_working_set_size_windows
|
||||
.set(MinuteAsSeconds(i), val);
|
||||
}
|
||||
|
||||
g.collect_group_into(enc)
|
||||
}
|
||||
}
|
||||
|
||||
/// This stores the values in range 0..60,
|
||||
/// encodes them as seconds (60, 120, 180, ..., 3600)
|
||||
#[derive(Clone, Copy)]
|
||||
struct MinuteAsSeconds(usize);
|
||||
|
||||
impl FixedCardinalityLabel for MinuteAsSeconds {
|
||||
fn cardinality() -> usize {
|
||||
60
|
||||
}
|
||||
|
||||
fn encode(&self) -> usize {
|
||||
self.0
|
||||
}
|
||||
|
||||
fn decode(value: usize) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl LabelValue for MinuteAsSeconds {
|
||||
fn visit<V: measured::label::LabelVisitor>(&self, v: V) -> V::Output {
|
||||
v.write_int((self.0 + 1) as i64 * 60)
|
||||
}
|
||||
}
|
||||
|
||||
impl LabelGroup for MinuteAsSeconds {
|
||||
fn visit_values(&self, v: &mut impl measured::label::LabelGroupVisitor) {
|
||||
v.write_value(LabelName::from_str("duration_seconds"), self);
|
||||
}
|
||||
}
|
||||
250
pgxn/neon/communicator/src/worker_process/logging.rs
Normal file
250
pgxn/neon/communicator/src/worker_process/logging.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
//! Glue code to hook up Rust logging with the `tracing` crate to the PostgreSQL log
|
||||
//!
|
||||
//! In the Rust threads, the log messages are written to a mpsc Channel, and the Postgres
|
||||
//! process latch is raised. That wakes up the loop in the main thread, see
|
||||
//! `communicator_new_bgworker_main()`. It reads the message from the channel and
|
||||
//! ereport()s it. This ensures that only one thread, the main thread, calls the
|
||||
//! PostgreSQL logging routines at any time.
|
||||
|
||||
use std::ffi::c_char;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::mpsc::sync_channel;
|
||||
use std::sync::mpsc::{Receiver, SyncSender};
|
||||
use std::sync::mpsc::{TryRecvError, TrySendError};
|
||||
|
||||
use tracing::info;
|
||||
use tracing::{Event, Level, Metadata, Subscriber};
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
use tracing_subscriber::fmt::format::Writer;
|
||||
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields, MakeWriter};
|
||||
use tracing_subscriber::registry::LookupSpan;
|
||||
|
||||
use crate::worker_process::callbacks::callback_set_my_latch;
|
||||
|
||||
/// This handle is passed to the C code, and used by [`communicator_worker_poll_logging`]
|
||||
pub struct LoggingReceiver {
|
||||
receiver: Receiver<FormattedEventWithMeta>,
|
||||
}
|
||||
|
||||
/// This is passed to `tracing`
|
||||
struct LoggingSender {
|
||||
sender: SyncSender<FormattedEventWithMeta>,
|
||||
}
|
||||
|
||||
static DROPPED_EVENT_COUNT: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
/// Called once, at worker process startup. The returned LoggingState is passed back
|
||||
/// in the subsequent calls to `pump_logging`. It is opaque to the C code.
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn communicator_worker_configure_logging() -> Box<LoggingReceiver> {
|
||||
let (sender, receiver) = sync_channel(1000);
|
||||
|
||||
let receiver = LoggingReceiver { receiver };
|
||||
let sender = LoggingSender { sender };
|
||||
|
||||
use tracing_subscriber::prelude::*;
|
||||
let r = tracing_subscriber::registry();
|
||||
|
||||
let r = r.with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_ansi(false)
|
||||
.event_format(SimpleFormatter)
|
||||
.with_writer(sender)
|
||||
// TODO: derive this from log_min_messages? Currently the code in
|
||||
// communicator_process.c forces log_min_messages='INFO'.
|
||||
.with_filter(LevelFilter::from_level(Level::INFO)),
|
||||
);
|
||||
r.init();
|
||||
|
||||
info!("communicator process logging started");
|
||||
|
||||
Box::new(receiver)
|
||||
}
|
||||
|
||||
/// Read one message from the logging queue. This is essentially a wrapper to Receiver,
|
||||
/// with a C-friendly signature.
|
||||
///
|
||||
/// The message is copied into *errbuf, which is a caller-supplied buffer of size
|
||||
/// `errbuf_len`. If the message doesn't fit in the buffer, it is truncated. It is always
|
||||
/// NULL-terminated.
|
||||
///
|
||||
/// The error level is returned *elevel_p. It's one of the PostgreSQL error levels, see
|
||||
/// elog.h
|
||||
///
|
||||
/// If there was a message, *dropped_event_count_p is also updated with a counter of how
|
||||
/// many log messages in total has been dropped. By comparing that with the value from
|
||||
/// previous call, you can tell how many were dropped since last call.
|
||||
///
|
||||
/// Returns:
|
||||
///
|
||||
/// 0 if there were no messages
|
||||
/// 1 if there was a message. The message and its level are returned in
|
||||
/// *errbuf and *elevel_p. *dropped_event_count_p is also updated.
|
||||
/// -1 on error, i.e the other end of the queue was disconnected
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn communicator_worker_poll_logging(
|
||||
state: &mut LoggingReceiver,
|
||||
errbuf: *mut c_char,
|
||||
errbuf_len: u32,
|
||||
elevel_p: &mut i32,
|
||||
dropped_event_count_p: &mut u64,
|
||||
) -> i32 {
|
||||
let msg = match state.receiver.try_recv() {
|
||||
Err(TryRecvError::Empty) => return 0,
|
||||
Err(TryRecvError::Disconnected) => return -1,
|
||||
Ok(msg) => msg,
|
||||
};
|
||||
|
||||
let src: &[u8] = &msg.message;
|
||||
let dst: *mut u8 = errbuf.cast();
|
||||
let len = std::cmp::min(src.len(), errbuf_len as usize - 1);
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(src.as_ptr(), dst, len);
|
||||
*(dst.add(len)) = b'\0'; // NULL terminator
|
||||
}
|
||||
|
||||
// Map the tracing Level to PostgreSQL elevel.
|
||||
//
|
||||
// XXX: These levels are copied from PostgreSQL's elog.h. Introduce another enum to
|
||||
// hide these?
|
||||
*elevel_p = match msg.level {
|
||||
Level::TRACE => 10, // DEBUG5
|
||||
Level::DEBUG => 14, // DEBUG1
|
||||
Level::INFO => 17, // INFO
|
||||
Level::WARN => 19, // WARNING
|
||||
Level::ERROR => 21, // ERROR
|
||||
};
|
||||
|
||||
*dropped_event_count_p = DROPPED_EVENT_COUNT.load(Ordering::Relaxed);
|
||||
|
||||
1
|
||||
}
|
||||
|
||||
//---- The following functions can be called from any thread ----
|
||||
|
||||
#[derive(Clone)]
|
||||
struct FormattedEventWithMeta {
|
||||
message: Vec<u8>,
|
||||
level: tracing::Level,
|
||||
}
|
||||
|
||||
impl Default for FormattedEventWithMeta {
|
||||
fn default() -> Self {
|
||||
FormattedEventWithMeta {
|
||||
message: Vec::new(),
|
||||
level: tracing::Level::DEBUG,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct EventBuilder<'a> {
|
||||
event: FormattedEventWithMeta,
|
||||
|
||||
sender: &'a LoggingSender,
|
||||
}
|
||||
|
||||
impl std::io::Write for EventBuilder<'_> {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
self.event.message.write(buf)
|
||||
}
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
self.sender.send_event(self.event.clone());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EventBuilder<'_> {
|
||||
fn drop(&mut self) {
|
||||
let sender = self.sender;
|
||||
let event = std::mem::take(&mut self.event);
|
||||
|
||||
sender.send_event(event);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MakeWriter<'a> for LoggingSender {
|
||||
type Writer = EventBuilder<'a>;
|
||||
|
||||
fn make_writer(&'a self) -> Self::Writer {
|
||||
panic!("not expected to be called when make_writer_for is implemented");
|
||||
}
|
||||
|
||||
fn make_writer_for(&'a self, meta: &Metadata<'_>) -> Self::Writer {
|
||||
EventBuilder {
|
||||
event: FormattedEventWithMeta {
|
||||
message: Vec::new(),
|
||||
level: *meta.level(),
|
||||
},
|
||||
sender: self,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LoggingSender {
|
||||
fn send_event(&self, e: FormattedEventWithMeta) {
|
||||
match self.sender.try_send(e) {
|
||||
Ok(()) => {
|
||||
// notify the main thread
|
||||
callback_set_my_latch();
|
||||
}
|
||||
Err(TrySendError::Disconnected(_)) => {}
|
||||
Err(TrySendError::Full(_)) => {
|
||||
// The queue is full, cannot send any more. To avoid blocking the tokio
|
||||
// thread, simply drop the message. Better to lose some logs than get
|
||||
// stuck if there's a problem with the logging.
|
||||
//
|
||||
// Record the fact that was a message was dropped by incrementing the
|
||||
// counter.
|
||||
DROPPED_EVENT_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple formatter implementation for tracing_subscriber, which prints the log spans and
|
||||
/// message part like the default formatter, but no timestamp or error level. The error
|
||||
/// level is captured separately by `FormattedEventWithMeta', and when the error is
|
||||
/// printed by the main thread, with PostgreSQL ereport(), it gets a timestamp at that
|
||||
/// point. (The timestamp printed will therefore lag behind the timestamp on the event
|
||||
/// here, if the main thread doesn't process the log message promptly)
|
||||
struct SimpleFormatter;
|
||||
|
||||
impl<S, N> FormatEvent<S, N> for SimpleFormatter
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> FormatFields<'a> + 'static,
|
||||
{
|
||||
fn format_event(
|
||||
&self,
|
||||
ctx: &FmtContext<'_, S, N>,
|
||||
mut writer: Writer<'_>,
|
||||
event: &Event<'_>,
|
||||
) -> std::fmt::Result {
|
||||
// Format all the spans in the event's span context.
|
||||
if let Some(scope) = ctx.event_scope() {
|
||||
for span in scope.from_root() {
|
||||
write!(writer, "{}", span.name())?;
|
||||
|
||||
// `FormattedFields` is a formatted representation of the span's fields,
|
||||
// which is stored in its extensions by the `fmt` layer's `new_span`
|
||||
// method. The fields will have been formatted by the same field formatter
|
||||
// that's provided to the event formatter in the `FmtContext`.
|
||||
let ext = span.extensions();
|
||||
let fields = &ext
|
||||
.get::<FormattedFields<N>>()
|
||||
.expect("will never be `None`");
|
||||
|
||||
// Skip formatting the fields if the span had no fields.
|
||||
if !fields.is_empty() {
|
||||
write!(writer, "{{{fields}}}")?;
|
||||
}
|
||||
write!(writer, ": ")?;
|
||||
}
|
||||
}
|
||||
|
||||
// Write fields on the event
|
||||
ctx.field_format().format_fields(writer.by_ref(), event)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
66
pgxn/neon/communicator/src/worker_process/main_loop.rs
Normal file
66
pgxn/neon/communicator/src/worker_process/main_loop.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use std::str::FromStr as _;
|
||||
|
||||
use crate::worker_process::lfc_metrics::LfcMetricsCollector;
|
||||
|
||||
use measured::MetricGroup;
|
||||
use measured::metric::MetricEncoding;
|
||||
use measured::metric::gauge::GaugeState;
|
||||
use measured::metric::group::Encoding;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
pub struct CommunicatorWorkerProcessStruct {
|
||||
runtime: tokio::runtime::Runtime,
|
||||
|
||||
/*** Metrics ***/
|
||||
pub(crate) lfc_metrics: LfcMetricsCollector,
|
||||
}
|
||||
|
||||
/// Launch the communicator process's Rust subsystems
|
||||
pub(super) fn init(
|
||||
tenant_id: Option<&str>,
|
||||
timeline_id: Option<&str>,
|
||||
) -> Result<&'static CommunicatorWorkerProcessStruct, String> {
|
||||
// The caller validated these already
|
||||
let _tenant_id = tenant_id
|
||||
.map(TenantId::from_str)
|
||||
.transpose()
|
||||
.map_err(|e| format!("invalid tenant ID: {e}"))?;
|
||||
let _timeline_id = timeline_id
|
||||
.map(TimelineId::from_str)
|
||||
.transpose()
|
||||
.map_err(|e| format!("invalid timeline ID: {e}"))?;
|
||||
|
||||
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("communicator thread")
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let worker_struct = CommunicatorWorkerProcessStruct {
|
||||
// Note: it's important to not drop the runtime, or all the tasks are dropped
|
||||
// too. Including it in the returned struct is one way to keep it around.
|
||||
runtime,
|
||||
|
||||
// metrics
|
||||
lfc_metrics: LfcMetricsCollector,
|
||||
};
|
||||
let worker_struct = Box::leak(Box::new(worker_struct));
|
||||
|
||||
// Start the listener on the control socket
|
||||
worker_struct
|
||||
.runtime
|
||||
.block_on(worker_struct.launch_control_socket_listener())
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(worker_struct)
|
||||
}
|
||||
|
||||
impl<T> MetricGroup<T> for CommunicatorWorkerProcessStruct
|
||||
where
|
||||
T: Encoding,
|
||||
GaugeState: MetricEncoding<T>,
|
||||
{
|
||||
fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> {
|
||||
self.lfc_metrics.collect_group_into(enc)
|
||||
}
|
||||
}
|
||||
13
pgxn/neon/communicator/src/worker_process/mod.rs
Normal file
13
pgxn/neon/communicator/src/worker_process/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
//! This code runs in the communicator worker process. This provides
|
||||
//! the glue code to:
|
||||
//!
|
||||
//! - launch the main loop,
|
||||
//! - receive IO requests from backends and process them,
|
||||
//! - write results back to backends.
|
||||
|
||||
mod callbacks;
|
||||
mod control_socket;
|
||||
mod lfc_metrics;
|
||||
mod logging;
|
||||
mod main_loop;
|
||||
mod worker_interface;
|
||||
@@ -0,0 +1,60 @@
|
||||
//! Functions called from the C code in the worker process
|
||||
|
||||
use std::ffi::{CStr, CString, c_char};
|
||||
|
||||
use crate::worker_process::main_loop;
|
||||
use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct;
|
||||
|
||||
/// Launch the communicator's tokio tasks, which do most of the work.
|
||||
///
|
||||
/// The caller has initialized the process as a regular PostgreSQL background worker
|
||||
/// process.
|
||||
///
|
||||
/// Inputs:
|
||||
/// `tenant_id` and `timeline_id` can be NULL, if we're been launched in "non-Neon" mode,
|
||||
/// where we use local storage instead of connecting to remote neon storage. That's
|
||||
/// currently only used in some unit tests.
|
||||
///
|
||||
/// Result:
|
||||
/// Returns pointer to CommunicatorWorkerProcessStruct, which is a handle to running
|
||||
/// Rust tasks. The C code can use it to interact with the Rust parts. On failure, returns
|
||||
/// None/NULL, and an error message is returned in *error_p
|
||||
///
|
||||
/// This is called only once in the process, so the returned struct, and error message in
|
||||
/// case of failure, are simply leaked.
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn communicator_worker_launch(
|
||||
tenant_id: *const c_char,
|
||||
timeline_id: *const c_char,
|
||||
error_p: *mut *const c_char,
|
||||
) -> Option<&'static CommunicatorWorkerProcessStruct> {
|
||||
// Convert the arguments into more convenient Rust types
|
||||
let tenant_id = if tenant_id.is_null() {
|
||||
None
|
||||
} else {
|
||||
let cstr = unsafe { CStr::from_ptr(tenant_id) };
|
||||
Some(cstr.to_str().expect("assume UTF-8"))
|
||||
};
|
||||
let timeline_id = if timeline_id.is_null() {
|
||||
None
|
||||
} else {
|
||||
let cstr = unsafe { CStr::from_ptr(timeline_id) };
|
||||
Some(cstr.to_str().expect("assume UTF-8"))
|
||||
};
|
||||
|
||||
// The `init` function does all the work.
|
||||
let result = main_loop::init(tenant_id, timeline_id);
|
||||
|
||||
// On failure, return the error message to the C caller in *error_p.
|
||||
match result {
|
||||
Ok(worker_struct) => Some(worker_struct),
|
||||
Err(errmsg) => {
|
||||
let errmsg = CString::new(errmsg).expect("no nuls within error message");
|
||||
let errmsg = Box::leak(errmsg.into_boxed_c_str());
|
||||
let p: *const c_char = errmsg.as_ptr();
|
||||
|
||||
unsafe { *error_p = p };
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
273
pgxn/neon/communicator_process.c
Normal file
273
pgxn/neon/communicator_process.c
Normal file
@@ -0,0 +1,273 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* communicator_process.c
|
||||
* Functions for starting up the communicator background worker process.
|
||||
*
|
||||
* Currently, the communicator process only functions as a metrics
|
||||
* exporter. It provides an HTTP endpoint for polling a limited set of
|
||||
* metrics. TODO: In the future, it will do much more, i.e. handle all
|
||||
* the communications with the pageservers.
|
||||
*
|
||||
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include "miscadmin.h"
|
||||
#include "postmaster/bgworker.h"
|
||||
#include "postmaster/interrupt.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
#include "replication/walsender.h"
|
||||
#include "storage/ipc.h"
|
||||
#include "storage/latch.h"
|
||||
#include "storage/pmsignal.h"
|
||||
#include "storage/procsignal.h"
|
||||
#include "tcop/tcopprot.h"
|
||||
#include "utils/timestamp.h"
|
||||
|
||||
#include "communicator_process.h"
|
||||
#include "file_cache.h"
|
||||
#include "neon.h"
|
||||
#include "neon_perf_counters.h"
|
||||
|
||||
/* the rust bindings, generated by cbindgen */
|
||||
#include "communicator/communicator_bindings.h"
|
||||
|
||||
static void pump_logging(struct LoggingReceiver *logging);
|
||||
PGDLLEXPORT void communicator_new_bgworker_main(Datum main_arg);
|
||||
|
||||
/**** Initialization functions. These run in postmaster ****/
|
||||
|
||||
void
|
||||
pg_init_communicator_process(void)
|
||||
{
|
||||
BackgroundWorker bgw;
|
||||
|
||||
/* Initialize the background worker process */
|
||||
memset(&bgw, 0, sizeof(bgw));
|
||||
bgw.bgw_flags = BGWORKER_SHMEM_ACCESS;
|
||||
bgw.bgw_start_time = BgWorkerStart_PostmasterStart;
|
||||
snprintf(bgw.bgw_library_name, BGW_MAXLEN, "neon");
|
||||
snprintf(bgw.bgw_function_name, BGW_MAXLEN, "communicator_new_bgworker_main");
|
||||
snprintf(bgw.bgw_name, BGW_MAXLEN, "Storage communicator process");
|
||||
snprintf(bgw.bgw_type, BGW_MAXLEN, "Storage communicator process");
|
||||
bgw.bgw_restart_time = 5;
|
||||
bgw.bgw_notify_pid = 0;
|
||||
bgw.bgw_main_arg = (Datum) 0;
|
||||
|
||||
RegisterBackgroundWorker(&bgw);
|
||||
}
|
||||
|
||||
/**** Worker process functions. These run in the communicator worker process ****/
|
||||
|
||||
/*
|
||||
* Entry point for the communicator bgworker process
|
||||
*/
|
||||
void
|
||||
communicator_new_bgworker_main(Datum main_arg)
|
||||
{
|
||||
struct LoggingReceiver *logging;
|
||||
const char *errmsg = NULL;
|
||||
const struct CommunicatorWorkerProcessStruct *proc_handle;
|
||||
|
||||
/*
|
||||
* Pretend that this process is a WAL sender. That affects the shutdown
|
||||
* sequence: WAL senders are shut down last, after the final checkpoint
|
||||
* has been written. That's what we want for the communicator process too.
|
||||
*/
|
||||
am_walsender = true;
|
||||
MarkPostmasterChildWalSender();
|
||||
|
||||
/* Establish signal handlers. */
|
||||
pqsignal(SIGUSR1, procsignal_sigusr1_handler);
|
||||
/*
|
||||
* Postmaster sends us SIGUSR2 when all regular backends and bgworkers
|
||||
* have exited, and it's time for us to exit too
|
||||
*/
|
||||
pqsignal(SIGUSR2, die);
|
||||
pqsignal(SIGHUP, SignalHandlerForConfigReload);
|
||||
pqsignal(SIGTERM, die);
|
||||
|
||||
BackgroundWorkerUnblockSignals();
|
||||
|
||||
/*
|
||||
* By default, INFO messages are not printed to the log. We want
|
||||
* `tracing::info!` messages emitted from the communicator to be printed,
|
||||
* however, so increase the log level.
|
||||
*
|
||||
* XXX: This overrides any user-set value from the config file. That's not
|
||||
* great, but on the other hand, there should be little reason for user to
|
||||
* control the verbosity of the communicator. It's not too verbose by
|
||||
* default.
|
||||
*/
|
||||
SetConfigOption("log_min_messages", "INFO", PGC_SUSET, PGC_S_OVERRIDE);
|
||||
|
||||
logging = communicator_worker_configure_logging();
|
||||
|
||||
proc_handle = communicator_worker_launch(
|
||||
neon_tenant[0] == '\0' ? NULL : neon_tenant,
|
||||
neon_timeline[0] == '\0' ? NULL : neon_timeline,
|
||||
&errmsg
|
||||
);
|
||||
if (proc_handle == NULL)
|
||||
{
|
||||
/*
|
||||
* Something went wrong. Before exiting, forward any log messages that
|
||||
* might've been generated during the failed launch.
|
||||
*/
|
||||
pump_logging(logging);
|
||||
|
||||
elog(PANIC, "%s", errmsg);
|
||||
}
|
||||
|
||||
/*
|
||||
* The Rust tokio runtime has been launched, and it's running in the
|
||||
* background now. This loop in the main thread handles any interactions
|
||||
* we need with the rest of PostgreSQL.
|
||||
*
|
||||
* NB: This process is now multi-threaded! The Rust threads do not call
|
||||
* into any Postgres functions, but it's not entirely clear which Postgres
|
||||
* functions are safe to call from this main thread either. Be very
|
||||
* careful about adding anything non-trivial here.
|
||||
*
|
||||
* Also note that we try to react quickly to any log messages arriving
|
||||
* from the Rust thread. Be careful to not do anything too expensive here
|
||||
* that might cause delays.
|
||||
*/
|
||||
elog(LOG, "communicator threads started");
|
||||
for (;;)
|
||||
{
|
||||
TimestampTz before;
|
||||
long duration;
|
||||
|
||||
ResetLatch(MyLatch);
|
||||
|
||||
/*
|
||||
* Forward any log messages from the Rust threads into the normal
|
||||
* Postgres logging facility.
|
||||
*/
|
||||
pump_logging(logging);
|
||||
|
||||
/*
|
||||
* Check interrupts like system shutdown or config reload
|
||||
*
|
||||
* We mustn't block for too long within this loop, or we risk the log
|
||||
* queue to fill up and messages to be lost. Also, even if we can keep
|
||||
* up, if there's a long delay between sending a message and printing
|
||||
* it to the log, the timestamps on the messages get skewed, which is
|
||||
* confusing.
|
||||
*
|
||||
* We expect processing interrupts to happen fast enough that it's OK,
|
||||
* but measure it just in case, and print a warning if it takes longer
|
||||
* than 100 ms.
|
||||
*/
|
||||
#define LOG_SKEW_WARNING_MS 100
|
||||
before = GetCurrentTimestamp();
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
if (ConfigReloadPending)
|
||||
{
|
||||
ConfigReloadPending = false;
|
||||
ProcessConfigFile(PGC_SIGHUP);
|
||||
}
|
||||
|
||||
duration = TimestampDifferenceMilliseconds(before, GetCurrentTimestamp());
|
||||
if (duration > LOG_SKEW_WARNING_MS)
|
||||
elog(WARNING, "handling interrupts took %ld ms, communicator log timestamps might be skewed", duration);
|
||||
|
||||
/*
|
||||
* Wait until we are woken up. The rust threads will set the latch
|
||||
* when there's a log message to forward.
|
||||
*/
|
||||
(void) WaitLatch(MyLatch,
|
||||
WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
|
||||
0,
|
||||
PG_WAIT_EXTENSION);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pump_logging(struct LoggingReceiver *logging)
|
||||
{
|
||||
char errbuf[1000];
|
||||
int elevel;
|
||||
int32 rc;
|
||||
static uint64_t last_dropped_event_count = 0;
|
||||
uint64_t dropped_event_count;
|
||||
uint64_t dropped_now;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
rc = communicator_worker_poll_logging(logging,
|
||||
errbuf,
|
||||
sizeof(errbuf),
|
||||
&elevel,
|
||||
&dropped_event_count);
|
||||
if (rc == 0)
|
||||
{
|
||||
/* nothing to do */
|
||||
break;
|
||||
}
|
||||
else if (rc == 1)
|
||||
{
|
||||
/* Because we don't want to exit on error */
|
||||
|
||||
if (message_level_is_interesting(elevel))
|
||||
{
|
||||
/*
|
||||
* Prevent interrupts while cleaning up.
|
||||
*
|
||||
* (Not sure if this is required, but all the error handlers
|
||||
* in Postgres that are installed as sigsetjmp() targets do
|
||||
* this, so let's follow the example)
|
||||
*/
|
||||
HOLD_INTERRUPTS();
|
||||
|
||||
errstart(elevel, TEXTDOMAIN);
|
||||
errmsg_internal("[COMMUNICATOR] %s", errbuf);
|
||||
EmitErrorReport();
|
||||
FlushErrorState();
|
||||
|
||||
/* Now we can allow interrupts again */
|
||||
RESUME_INTERRUPTS();
|
||||
}
|
||||
}
|
||||
else if (rc == -1)
|
||||
{
|
||||
elog(ERROR, "logging channel was closed unexpectedly");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the queue was full at any time since the last time we reported it,
|
||||
* report how many messages were lost. We do this outside the loop, so
|
||||
* that if the logging system is clogged, we don't exacerbate it by
|
||||
* printing lots of warnings about dropped messages.
|
||||
*/
|
||||
dropped_now = dropped_event_count - last_dropped_event_count;
|
||||
if (dropped_now != 0)
|
||||
{
|
||||
elog(WARNING, "%lu communicator log messages were dropped because the log buffer was full",
|
||||
(unsigned long) dropped_now);
|
||||
last_dropped_event_count = dropped_event_count;
|
||||
}
|
||||
}
|
||||
|
||||
/****
|
||||
* Callbacks from the rust code, in the communicator process.
|
||||
*
|
||||
* NOTE: These must be thread-safe! It's very limited which PostgreSQL
|
||||
* functions you can use!!!
|
||||
*
|
||||
* The signatures of these need to match those in the Rust code.
|
||||
*/
|
||||
|
||||
void
|
||||
callback_set_my_latch_unsafe(void)
|
||||
{
|
||||
SetLatch(MyLatch);
|
||||
}
|
||||
17
pgxn/neon/communicator_process.h
Normal file
17
pgxn/neon/communicator_process.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* communicator_process.h
|
||||
* Communicator process
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#ifndef COMMUNICATOR_PROCESS_H
|
||||
#define COMMUNICATOR_PROCESS_H
|
||||
|
||||
extern void pg_init_communicator_process(void);
|
||||
|
||||
#endif /* COMMUNICATOR_PROCESS_H */
|
||||
@@ -52,6 +52,8 @@
|
||||
#include "pagestore_client.h"
|
||||
#include "communicator.h"
|
||||
|
||||
#include "communicator/communicator_bindings.h"
|
||||
|
||||
#define CriticalAssert(cond) do if (!(cond)) elog(PANIC, "LFC: assertion %s failed at %s:%d: ", #cond, __FILE__, __LINE__); while (0)
|
||||
|
||||
/*
|
||||
@@ -2156,6 +2158,38 @@ lfc_approximate_working_set_size_seconds(time_t duration, bool reset)
|
||||
return dc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get metrics, for the built-in metrics exporter that's part of the communicator
|
||||
* process.
|
||||
*
|
||||
* NB: This is called from a Rust tokio task inside the communicator process.
|
||||
* Acquiring lwlocks, elog(), allocating memory or anything else non-trivial
|
||||
* is strictly prohibited here!
|
||||
*/
|
||||
struct LfcMetrics
|
||||
callback_get_lfc_metrics_unsafe(void)
|
||||
{
|
||||
struct LfcMetrics result = {
|
||||
.lfc_cache_size_limit = (int64) lfc_size_limit * 1024 * 1024,
|
||||
.lfc_hits = lfc_ctl ? lfc_ctl->hits : 0,
|
||||
.lfc_misses = lfc_ctl ? lfc_ctl->misses : 0,
|
||||
.lfc_used = lfc_ctl ? lfc_ctl->used : 0,
|
||||
.lfc_writes = lfc_ctl ? lfc_ctl->writes : 0,
|
||||
};
|
||||
|
||||
if (lfc_ctl)
|
||||
{
|
||||
for (int minutes = 1; minutes <= 60; minutes++)
|
||||
{
|
||||
result.lfc_approximate_working_set_size_windows[minutes - 1] =
|
||||
lfc_approximate_working_set_size_seconds(minutes * 60, false);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
PG_FUNCTION_INFO_V1(get_local_cache_state);
|
||||
|
||||
Datum
|
||||
|
||||
@@ -178,6 +178,8 @@ static PageServer page_servers[MAX_SHARDS];
|
||||
static bool pageserver_flush(shardno_t shard_no);
|
||||
static void pageserver_disconnect(shardno_t shard_no);
|
||||
static void pageserver_disconnect_shard(shardno_t shard_no);
|
||||
// HADRON
|
||||
shardno_t get_num_shards(void);
|
||||
|
||||
static bool
|
||||
PagestoreShmemIsValid(void)
|
||||
@@ -286,6 +288,22 @@ AssignPageserverConnstring(const char *newval, void *extra)
|
||||
}
|
||||
}
|
||||
|
||||
/* BEGIN_HADRON */
|
||||
/**
|
||||
* Return the total number of shards seen in the shard map.
|
||||
*/
|
||||
shardno_t get_num_shards(void)
|
||||
{
|
||||
const ShardMap *shard_map;
|
||||
|
||||
Assert(pagestore_shared);
|
||||
shard_map = &pagestore_shared->shard_map;
|
||||
|
||||
Assert(shard_map != NULL);
|
||||
return shard_map->num_shards;
|
||||
}
|
||||
/* END_HADRON */
|
||||
|
||||
/*
|
||||
* Get the current number of shards, and/or the connection string for a
|
||||
* particular shard from the shard map in shared memory.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user