Use enum-typed PG versions (#12317)

This makes it possible for the compiler to validate that a match block
matched all PostgreSQL versions we support.

## Problem
We did not have a complete picture about which places we had to test
against PG versions, and what format these versions were: The full PG
version ID format (Major/minor/bugfix `MMmmbb`) as transfered in
protocol messages, or only the Major release version (`MM`). This meant
type confusion was rampant.

With this change, it becomes easier to develop new version-dependent
features, by making type and niche confusion impossible.

## Summary of changes
Every use of `pg_version` is now typed as either `PgVersionId` (u32,
valued in decimal `MMmmbb`) or PgMajorVersion (an enum, with a value for
every major version we support, serialized and stored like a u32 with
the value of that major version)

---------

Co-authored-by: Arpad Müller <arpad-m@users.noreply.github.com>
This commit is contained in:
Matthias van de Meent
2025-06-24 19:25:31 +02:00
committed by GitHub
parent 158d84ea30
commit 6c6de6382a
62 changed files with 683 additions and 386 deletions

29
Cargo.lock generated
View File

@@ -1318,6 +1318,7 @@ dependencies = [
"p256 0.13.2", "p256 0.13.2",
"postgres", "postgres",
"postgres_initdb", "postgres_initdb",
"postgres_versioninfo",
"regex", "regex",
"remote_storage", "remote_storage",
"reqwest", "reqwest",
@@ -4406,6 +4407,7 @@ dependencies = [
"once_cell", "once_cell",
"postgres_backend", "postgres_backend",
"postgres_ffi_types", "postgres_ffi_types",
"postgres_versioninfo",
"rand 0.8.5", "rand 0.8.5",
"remote_storage", "remote_storage",
"reqwest", "reqwest",
@@ -4429,6 +4431,7 @@ dependencies = [
"futures", "futures",
"http-utils", "http-utils",
"pageserver_api", "pageserver_api",
"postgres_versioninfo",
"reqwest", "reqwest",
"serde", "serde",
"thiserror 1.0.69", "thiserror 1.0.69",
@@ -4897,6 +4900,7 @@ dependencies = [
"once_cell", "once_cell",
"postgres", "postgres",
"postgres_ffi_types", "postgres_ffi_types",
"postgres_versioninfo",
"pprof", "pprof",
"regex", "regex",
"serde", "serde",
@@ -4919,11 +4923,23 @@ version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"camino", "camino",
"postgres_versioninfo",
"thiserror 1.0.69", "thiserror 1.0.69",
"tokio", "tokio",
"workspace_hack", "workspace_hack",
] ]
[[package]]
name = "postgres_versioninfo"
version = "0.1.0"
dependencies = [
"anyhow",
"serde",
"serde_repr",
"thiserror 1.0.69",
"workspace_hack",
]
[[package]] [[package]]
name = "posthog_client_lite" name = "posthog_client_lite"
version = "0.1.0" version = "0.1.0"
@@ -6115,6 +6131,7 @@ dependencies = [
"postgres-protocol", "postgres-protocol",
"postgres_backend", "postgres_backend",
"postgres_ffi", "postgres_ffi",
"postgres_versioninfo",
"pprof", "pprof",
"pq_proto", "pq_proto",
"rand 0.8.5", "rand 0.8.5",
@@ -6159,6 +6176,7 @@ dependencies = [
"const_format", "const_format",
"pageserver_api", "pageserver_api",
"postgres_ffi", "postgres_ffi",
"postgres_versioninfo",
"pq_proto", "pq_proto",
"serde", "serde",
"serde_json", "serde_json",
@@ -6481,6 +6499,17 @@ dependencies = [
"thiserror 1.0.69", "thiserror 1.0.69",
] ]
[[package]]
name = "serde_repr"
version = "0.1.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.100",
]
[[package]] [[package]]
name = "serde_spanned" name = "serde_spanned"
version = "0.6.6" version = "0.6.6"

View File

@@ -23,6 +23,7 @@ members = [
"libs/pageserver_api", "libs/pageserver_api",
"libs/postgres_ffi", "libs/postgres_ffi",
"libs/postgres_ffi_types", "libs/postgres_ffi_types",
"libs/postgres_versioninfo",
"libs/safekeeper_api", "libs/safekeeper_api",
"libs/desim", "libs/desim",
"libs/neon-shmem", "libs/neon-shmem",
@@ -174,6 +175,7 @@ serde_json = "1"
serde_path_to_error = "0.1" serde_path_to_error = "0.1"
serde_with = { version = "3", features = [ "base64" ] } serde_with = { version = "3", features = [ "base64" ] }
serde_assert = "0.5.0" serde_assert = "0.5.0"
serde_repr = "0.1.20"
sha2 = "0.10.2" sha2 = "0.10.2"
signal-hook = "0.3" signal-hook = "0.3"
smallvec = "1.11" smallvec = "1.11"
@@ -261,6 +263,7 @@ postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" }
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" } postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" } postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
postgres_ffi_types = { version = "0.1", path = "./libs/postgres_ffi_types/" } postgres_ffi_types = { version = "0.1", path = "./libs/postgres_ffi_types/" }
postgres_versioninfo = { version = "0.1", path = "./libs/postgres_versioninfo/" }
postgres_initdb = { path = "./libs/postgres_initdb" } postgres_initdb = { path = "./libs/postgres_initdb" }
posthog_client_lite = { version = "0.1", path = "./libs/posthog_client_lite" } posthog_client_lite = { version = "0.1", path = "./libs/posthog_client_lite" }
pq_proto = { version = "0.1", path = "./libs/pq_proto/" } pq_proto = { version = "0.1", path = "./libs/pq_proto/" }

View File

@@ -64,6 +64,7 @@ uuid.workspace = true
walkdir.workspace = true walkdir.workspace = true
x509-cert.workspace = true x509-cert.workspace = true
postgres_versioninfo.workspace = true
postgres_initdb.workspace = true postgres_initdb.workspace = true
compute_api.workspace = true compute_api.workspace = true
utils.workspace = true utils.workspace = true

View File

@@ -29,7 +29,7 @@ use anyhow::{Context, bail};
use aws_config::BehaviorVersion; use aws_config::BehaviorVersion;
use camino::{Utf8Path, Utf8PathBuf}; use camino::{Utf8Path, Utf8PathBuf};
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use compute_tools::extension_server::{PostgresMajorVersion, get_pg_version}; use compute_tools::extension_server::get_pg_version;
use nix::unistd::Pid; use nix::unistd::Pid;
use std::ops::Not; use std::ops::Not;
use tracing::{Instrument, error, info, info_span, warn}; use tracing::{Instrument, error, info, info_span, warn};
@@ -179,12 +179,8 @@ impl PostgresProcess {
.await .await
.context("create pgdata directory")?; .context("create pgdata directory")?;
let pg_version = match get_pg_version(self.pgbin.as_ref()) { let pg_version = get_pg_version(self.pgbin.as_ref());
PostgresMajorVersion::V14 => 14,
PostgresMajorVersion::V15 => 15,
PostgresMajorVersion::V16 => 16,
PostgresMajorVersion::V17 => 17,
};
postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs { postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
superuser: initdb_user, superuser: initdb_user,
locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded, locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded,

View File

@@ -74,9 +74,11 @@ More specifically, here is an example ext_index.json
use std::path::Path; use std::path::Path;
use std::str; use std::str;
use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
use anyhow::{Context, Result, bail}; use anyhow::{Context, Result, bail};
use bytes::Bytes; use bytes::Bytes;
use compute_api::spec::RemoteExtSpec; use compute_api::spec::RemoteExtSpec;
use postgres_versioninfo::PgMajorVersion;
use regex::Regex; use regex::Regex;
use remote_storage::*; use remote_storage::*;
use reqwest::StatusCode; use reqwest::StatusCode;
@@ -86,8 +88,6 @@ use tracing::log::warn;
use url::Url; use url::Url;
use zstd::stream::read::Decoder; use zstd::stream::read::Decoder;
use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
fn get_pg_config(argument: &str, pgbin: &str) -> String { fn get_pg_config(argument: &str, pgbin: &str) -> String {
// gives the result of `pg_config [argument]` // gives the result of `pg_config [argument]`
// where argument is a flag like `--version` or `--sharedir` // where argument is a flag like `--version` or `--sharedir`
@@ -106,7 +106,7 @@ fn get_pg_config(argument: &str, pgbin: &str) -> String {
.to_string() .to_string()
} }
pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion { pub fn get_pg_version(pgbin: &str) -> PgMajorVersion {
// pg_config --version returns a (platform specific) human readable string // pg_config --version returns a (platform specific) human readable string
// such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc. // such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc.
let human_version = get_pg_config("--version", pgbin); let human_version = get_pg_config("--version", pgbin);
@@ -114,25 +114,11 @@ pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion {
} }
pub fn get_pg_version_string(pgbin: &str) -> String { pub fn get_pg_version_string(pgbin: &str) -> String {
match get_pg_version(pgbin) { get_pg_version(pgbin).v_str()
PostgresMajorVersion::V14 => "v14",
PostgresMajorVersion::V15 => "v15",
PostgresMajorVersion::V16 => "v16",
PostgresMajorVersion::V17 => "v17",
}
.to_owned()
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq)] fn parse_pg_version(human_version: &str) -> PgMajorVersion {
pub enum PostgresMajorVersion { use PgMajorVersion::*;
V14,
V15,
V16,
V17,
}
fn parse_pg_version(human_version: &str) -> PostgresMajorVersion {
use PostgresMajorVersion::*;
// Normal releases have version strings like "PostgreSQL 15.4". But there // Normal releases have version strings like "PostgreSQL 15.4". But there
// are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL // are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL
// 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version // 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version
@@ -143,10 +129,10 @@ fn parse_pg_version(human_version: &str) -> PostgresMajorVersion {
.captures(human_version) .captures(human_version)
{ {
Some(captures) if captures.len() == 2 => match &captures["major"] { Some(captures) if captures.len() == 2 => match &captures["major"] {
"14" => return V14, "14" => return PG14,
"15" => return V15, "15" => return PG15,
"16" => return V16, "16" => return PG16,
"17" => return V17, "17" => return PG17,
_ => {} _ => {}
}, },
_ => {} _ => {}
@@ -343,25 +329,25 @@ mod tests {
#[test] #[test]
fn test_parse_pg_version() { fn test_parse_pg_version() {
use super::PostgresMajorVersion::*; use postgres_versioninfo::PgMajorVersion::*;
assert_eq!(parse_pg_version("PostgreSQL 15.4"), V15); assert_eq!(parse_pg_version("PostgreSQL 15.4"), PG15);
assert_eq!(parse_pg_version("PostgreSQL 15.14"), V15); assert_eq!(parse_pg_version("PostgreSQL 15.14"), PG15);
assert_eq!( assert_eq!(
parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"), parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"),
V15 PG15
); );
assert_eq!(parse_pg_version("PostgreSQL 14.15"), V14); assert_eq!(parse_pg_version("PostgreSQL 14.15"), PG14);
assert_eq!(parse_pg_version("PostgreSQL 14.0"), V14); assert_eq!(parse_pg_version("PostgreSQL 14.0"), PG14);
assert_eq!( assert_eq!(
parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"), parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"),
V14 PG14
); );
assert_eq!(parse_pg_version("PostgreSQL 16devel"), V16); assert_eq!(parse_pg_version("PostgreSQL 16devel"), PG16);
assert_eq!(parse_pg_version("PostgreSQL 16beta1"), V16); assert_eq!(parse_pg_version("PostgreSQL 16beta1"), PG16);
assert_eq!(parse_pg_version("PostgreSQL 16rc2"), V16); assert_eq!(parse_pg_version("PostgreSQL 16rc2"), PG16);
assert_eq!(parse_pg_version("PostgreSQL 16extra"), V16); assert_eq!(parse_pg_version("PostgreSQL 16extra"), PG16);
} }
#[test] #[test]

View File

@@ -48,7 +48,7 @@ use postgres_connection::parse_host_port;
use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId}; use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId};
use safekeeper_api::{ use safekeeper_api::{
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT, DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT,
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, PgMajorVersion, PgVersionId,
}; };
use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR; use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR;
use tokio::task::JoinSet; use tokio::task::JoinSet;
@@ -64,7 +64,7 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1);
const DEFAULT_BRANCH_NAME: &str = "main"; const DEFAULT_BRANCH_NAME: &str = "main";
project_git_version!(GIT_VERSION); project_git_version!(GIT_VERSION);
const DEFAULT_PG_VERSION: u32 = 17; const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/"; const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/";
@@ -169,7 +169,7 @@ struct TenantCreateCmdArgs {
#[arg(default_value_t = DEFAULT_PG_VERSION)] #[arg(default_value_t = DEFAULT_PG_VERSION)]
#[clap(long, help = "Postgres version to use for the initial timeline")] #[clap(long, help = "Postgres version to use for the initial timeline")]
pg_version: u32, pg_version: PgMajorVersion,
#[clap( #[clap(
long, long,
@@ -292,7 +292,7 @@ struct TimelineCreateCmdArgs {
#[arg(default_value_t = DEFAULT_PG_VERSION)] #[arg(default_value_t = DEFAULT_PG_VERSION)]
#[clap(long, help = "Postgres version")] #[clap(long, help = "Postgres version")]
pg_version: u32, pg_version: PgMajorVersion,
} }
#[derive(clap::Args)] #[derive(clap::Args)]
@@ -324,7 +324,7 @@ struct TimelineImportCmdArgs {
#[arg(default_value_t = DEFAULT_PG_VERSION)] #[arg(default_value_t = DEFAULT_PG_VERSION)]
#[clap(long, help = "Postgres version of the backup being imported")] #[clap(long, help = "Postgres version of the backup being imported")]
pg_version: u32, pg_version: PgMajorVersion,
} }
#[derive(clap::Subcommand)] #[derive(clap::Subcommand)]
@@ -603,7 +603,7 @@ struct EndpointCreateCmdArgs {
#[arg(default_value_t = DEFAULT_PG_VERSION)] #[arg(default_value_t = DEFAULT_PG_VERSION)]
#[clap(long, help = "Postgres version")] #[clap(long, help = "Postgres version")]
pg_version: u32, pg_version: PgMajorVersion,
/// Use gRPC to communicate with Pageservers, by generating grpc:// connstrings. /// Use gRPC to communicate with Pageservers, by generating grpc:// connstrings.
/// ///
@@ -1295,7 +1295,7 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re
}, },
new_members: None, new_members: None,
}; };
let pg_version = args.pg_version * 10000; let pg_version = PgVersionId::from(args.pg_version);
let req = safekeeper_api::models::TimelineCreateRequest { let req = safekeeper_api::models::TimelineCreateRequest {
tenant_id, tenant_id,
timeline_id, timeline_id,

View File

@@ -67,6 +67,7 @@ use nix::sys::signal::{Signal, kill};
use pageserver_api::shard::ShardStripeSize; use pageserver_api::shard::ShardStripeSize;
use pem::Pem; use pem::Pem;
use reqwest::header::CONTENT_TYPE; use reqwest::header::CONTENT_TYPE;
use safekeeper_api::PgMajorVersion;
use safekeeper_api::membership::SafekeeperGeneration; use safekeeper_api::membership::SafekeeperGeneration;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
@@ -89,7 +90,7 @@ pub struct EndpointConf {
pg_port: u16, pg_port: u16,
external_http_port: u16, external_http_port: u16,
internal_http_port: u16, internal_http_port: u16,
pg_version: u32, pg_version: PgMajorVersion,
grpc: bool, grpc: bool,
skip_pg_catalog_updates: bool, skip_pg_catalog_updates: bool,
reconfigure_concurrency: usize, reconfigure_concurrency: usize,
@@ -192,7 +193,7 @@ impl ComputeControlPlane {
pg_port: Option<u16>, pg_port: Option<u16>,
external_http_port: Option<u16>, external_http_port: Option<u16>,
internal_http_port: Option<u16>, internal_http_port: Option<u16>,
pg_version: u32, pg_version: PgMajorVersion,
mode: ComputeMode, mode: ComputeMode,
grpc: bool, grpc: bool,
skip_pg_catalog_updates: bool, skip_pg_catalog_updates: bool,
@@ -312,7 +313,7 @@ pub struct Endpoint {
pub internal_http_address: SocketAddr, pub internal_http_address: SocketAddr,
// postgres major version in the format: 14, 15, etc. // postgres major version in the format: 14, 15, etc.
pg_version: u32, pg_version: PgMajorVersion,
// These are not part of the endpoint as such, but the environment // These are not part of the endpoint as such, but the environment
// the endpoint runs in. // the endpoint runs in.
@@ -557,7 +558,7 @@ impl Endpoint {
conf.append("hot_standby", "on"); conf.append("hot_standby", "on");
// prefetching of blocks referenced in WAL doesn't make sense for us // prefetching of blocks referenced in WAL doesn't make sense for us
// Neon hot standby ignores pages that are not in the shared_buffers // Neon hot standby ignores pages that are not in the shared_buffers
if self.pg_version >= 15 { if self.pg_version >= PgMajorVersion::PG15 {
conf.append("recovery_prefetch", "off"); conf.append("recovery_prefetch", "off");
} }
} }

View File

@@ -15,6 +15,7 @@ use clap::ValueEnum;
use pem::Pem; use pem::Pem;
use postgres_backend::AuthType; use postgres_backend::AuthType;
use reqwest::{Certificate, Url}; use reqwest::{Certificate, Url};
use safekeeper_api::PgMajorVersion;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use utils::auth::encode_from_key_file; use utils::auth::encode_from_key_file;
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
@@ -424,25 +425,21 @@ impl LocalEnv {
self.pg_distrib_dir.clone() self.pg_distrib_dir.clone()
} }
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> { pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
let path = self.pg_distrib_dir.clone(); let path = self.pg_distrib_dir.clone();
#[allow(clippy::manual_range_patterns)] Ok(path.join(pg_version.v_str()))
match pg_version {
14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))),
_ => bail!("Unsupported postgres version: {}", pg_version),
}
} }
pub fn pg_dir(&self, pg_version: u32, dir_name: &str) -> anyhow::Result<PathBuf> { pub fn pg_dir(&self, pg_version: PgMajorVersion, dir_name: &str) -> anyhow::Result<PathBuf> {
Ok(self.pg_distrib_dir(pg_version)?.join(dir_name)) Ok(self.pg_distrib_dir(pg_version)?.join(dir_name))
} }
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> { pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
self.pg_dir(pg_version, "bin") self.pg_dir(pg_version, "bin")
} }
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> { pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
self.pg_dir(pg_version, "lib") self.pg_dir(pg_version, "lib")
} }

View File

@@ -22,6 +22,7 @@ use pageserver_api::shard::TenantShardId;
use pageserver_client::mgmt_api; use pageserver_client::mgmt_api;
use postgres_backend::AuthType; use postgres_backend::AuthType;
use postgres_connection::{PgConnectionConfig, parse_host_port}; use postgres_connection::{PgConnectionConfig, parse_host_port};
use safekeeper_api::PgMajorVersion;
use utils::auth::{Claims, Scope}; use utils::auth::{Claims, Scope};
use utils::id::{NodeId, TenantId, TimelineId}; use utils::id::{NodeId, TenantId, TimelineId};
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -607,7 +608,7 @@ impl PageServerNode {
timeline_id: TimelineId, timeline_id: TimelineId,
base: (Lsn, PathBuf), base: (Lsn, PathBuf),
pg_wal: Option<(Lsn, PathBuf)>, pg_wal: Option<(Lsn, PathBuf)>,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// Init base reader // Init base reader
let (start_lsn, base_tarfile_path) = base; let (start_lsn, base_tarfile_path) = base;

View File

@@ -6,6 +6,8 @@ use std::str::FromStr;
use std::sync::OnceLock; use std::sync::OnceLock;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use crate::background_process;
use crate::local_env::{LocalEnv, NeonStorageControllerConf};
use camino::{Utf8Path, Utf8PathBuf}; use camino::{Utf8Path, Utf8PathBuf};
use hyper0::Uri; use hyper0::Uri;
use nix::unistd::Pid; use nix::unistd::Pid;
@@ -22,6 +24,7 @@ use pageserver_client::mgmt_api::ResponseErrorMessageExt;
use pem::Pem; use pem::Pem;
use postgres_backend::AuthType; use postgres_backend::AuthType;
use reqwest::{Method, Response}; use reqwest::{Method, Response};
use safekeeper_api::PgMajorVersion;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::process::Command; use tokio::process::Command;
@@ -31,9 +34,6 @@ use utils::auth::{Claims, Scope, encode_from_key_file};
use utils::id::{NodeId, TenantId}; use utils::id::{NodeId, TenantId};
use whoami::username; use whoami::username;
use crate::background_process;
use crate::local_env::{LocalEnv, NeonStorageControllerConf};
pub struct StorageController { pub struct StorageController {
env: LocalEnv, env: LocalEnv,
private_key: Option<Pem>, private_key: Option<Pem>,
@@ -48,7 +48,7 @@ pub struct StorageController {
const COMMAND: &str = "storage_controller"; const COMMAND: &str = "storage_controller";
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; const STORAGE_CONTROLLER_POSTGRES_VERSION: PgMajorVersion = PgMajorVersion::PG16;
const DB_NAME: &str = "storage_controller"; const DB_NAME: &str = "storage_controller";
@@ -184,9 +184,15 @@ impl StorageController {
/// to other versions if that one isn't found. Some automated tests create circumstances /// to other versions if that one isn't found. Some automated tests create circumstances
/// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`. /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> { async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> {
let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14]; const PREFER_VERSIONS: [PgMajorVersion; 5] = [
STORAGE_CONTROLLER_POSTGRES_VERSION,
PgMajorVersion::PG16,
PgMajorVersion::PG15,
PgMajorVersion::PG14,
PgMajorVersion::PG17,
];
for v in prefer_versions { for v in PREFER_VERSIONS {
let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap(); let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap();
if tokio::fs::try_exists(&path).await? { if tokio::fs::try_exists(&path).await? {
return Ok(path); return Ok(path);

View File

@@ -18,6 +18,7 @@ bytes.workspace = true
byteorder.workspace = true byteorder.workspace = true
utils.workspace = true utils.workspace = true
postgres_ffi_types.workspace = true postgres_ffi_types.workspace = true
postgres_versioninfo.workspace = true
enum-map.workspace = true enum-map.workspace = true
strum.workspace = true strum.workspace = true
strum_macros.workspace = true strum_macros.workspace = true

View File

@@ -11,6 +11,7 @@ use std::time::{Duration, SystemTime};
#[cfg(feature = "testing")] #[cfg(feature = "testing")]
use camino::Utf8PathBuf; use camino::Utf8PathBuf;
use postgres_versioninfo::PgMajorVersion;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::serde_as; use serde_with::serde_as;
pub use utilization::PageserverUtilization; pub use utilization::PageserverUtilization;
@@ -398,7 +399,7 @@ pub enum TimelineCreateRequestMode {
// inherits the ancestor's pg_version. Earlier code wasn't // inherits the ancestor's pg_version. Earlier code wasn't
// using a flattened enum, so, it was an accepted field, and // using a flattened enum, so, it was an accepted field, and
// we continue to accept it by having it here. // we continue to accept it by having it here.
pg_version: Option<u32>, pg_version: Option<PgMajorVersion>,
#[serde(default, skip_serializing_if = "std::ops::Not::not")] #[serde(default, skip_serializing_if = "std::ops::Not::not")]
read_only: bool, read_only: bool,
}, },
@@ -410,7 +411,7 @@ pub enum TimelineCreateRequestMode {
Bootstrap { Bootstrap {
#[serde(default)] #[serde(default)]
existing_initdb_timeline_id: Option<TimelineId>, existing_initdb_timeline_id: Option<TimelineId>,
pg_version: Option<u32>, pg_version: Option<PgMajorVersion>,
}, },
} }
@@ -1573,7 +1574,7 @@ pub struct TimelineInfo {
pub last_received_msg_lsn: Option<Lsn>, pub last_received_msg_lsn: Option<Lsn>,
/// the timestamp (in microseconds) of the last received message /// the timestamp (in microseconds) of the last received message
pub last_received_msg_ts: Option<u128>, pub last_received_msg_ts: Option<u128>,
pub pg_version: u32, pub pg_version: PgMajorVersion,
pub state: TimelineState, pub state: TimelineState,

View File

@@ -19,6 +19,7 @@ serde.workspace = true
postgres_ffi_types.workspace = true postgres_ffi_types.workspace = true
utils.workspace = true utils.workspace = true
tracing.workspace = true tracing.workspace = true
postgres_versioninfo.workspace = true
[dev-dependencies] [dev-dependencies]
env_logger.workspace = true env_logger.workspace = true

View File

@@ -4,6 +4,7 @@ use criterion::{Bencher, Criterion, criterion_group, criterion_main};
use postgres_ffi::v17::wal_generator::LogicalMessageGenerator; use postgres_ffi::v17::wal_generator::LogicalMessageGenerator;
use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler; use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler;
use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_versioninfo::PgMajorVersion;
use pprof::criterion::{Output, PProfProfiler}; use pprof::criterion::{Output, PProfProfiler};
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -32,7 +33,7 @@ fn bench_complete_record(c: &mut Criterion) {
let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX); let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX);
let value = vec![1; value_size]; let value = vec![1; value_size];
let mut decoder = WalStreamDecoder::new(Lsn(0), 170000); let mut decoder = WalStreamDecoder::new(Lsn(0), PgMajorVersion::PG17);
let msg = LogicalMessageGenerator::new(PREFIX, &value) let msg = LogicalMessageGenerator::new(PREFIX, &value)
.next() .next()
.unwrap() .unwrap()

View File

@@ -14,6 +14,8 @@ use bytes::Bytes;
use utils::bin_ser::SerializeError; use utils::bin_ser::SerializeError;
use utils::lsn::Lsn; use utils::lsn::Lsn;
pub use postgres_versioninfo::PgMajorVersion;
macro_rules! postgres_ffi { macro_rules! postgres_ffi {
($version:ident) => { ($version:ident) => {
#[path = "."] #[path = "."]
@@ -91,21 +93,22 @@ macro_rules! dispatch_pgversion {
$version => $code, $version => $code,
default = $invalid_pgver_handling, default = $invalid_pgver_handling,
pgversions = [ pgversions = [
14 : v14, $crate::PgMajorVersion::PG14 => v14,
15 : v15, $crate::PgMajorVersion::PG15 => v15,
16 : v16, $crate::PgMajorVersion::PG16 => v16,
17 : v17, $crate::PgMajorVersion::PG17 => v17,
] ]
) )
}; };
($pgversion:expr => $code:expr, ($pgversion:expr => $code:expr,
default = $default:expr, default = $default:expr,
pgversions = [$($sv:literal : $vsv:ident),+ $(,)?]) => { pgversions = [$($sv:pat => $vsv:ident),+ $(,)?]) => {
match ($pgversion) { match ($pgversion.clone().into()) {
$($sv => { $($sv => {
use $crate::$vsv as pgv; use $crate::$vsv as pgv;
$code $code
},)+ },)+
#[allow(unreachable_patterns)]
_ => { _ => {
$default $default
} }
@@ -179,9 +182,9 @@ macro_rules! enum_pgversion {
$($variant ( $crate::$md::$t )),+ $($variant ( $crate::$md::$t )),+
} }
impl self::$name { impl self::$name {
pub fn pg_version(&self) -> u32 { pub fn pg_version(&self) -> PgMajorVersion {
enum_pgversion_dispatch!(self, $name, _ign, { enum_pgversion_dispatch!(self, $name, _ign, {
pgv::bindings::PG_MAJORVERSION_NUM pgv::bindings::MY_PGVERSION
}) })
} }
} }
@@ -195,15 +198,15 @@ macro_rules! enum_pgversion {
}; };
{name = $name:ident, {name = $name:ident,
path = $p:ident, path = $p:ident,
typ = $t:ident, $(typ = $t:ident,)?
pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => { pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => {
pub enum $name { pub enum $name {
$($variant ($crate::$md::$p::$t)),+ $($variant $(($crate::$md::$p::$t))?),+
} }
impl $name { impl $name {
pub fn pg_version(&self) -> u32 { pub fn pg_version(&self) -> PgMajorVersion {
enum_pgversion_dispatch!(self, $name, _ign, { enum_pgversion_dispatch!(self, $name, _ign, {
pgv::bindings::PG_MAJORVERSION_NUM pgv::bindings::MY_PGVERSION
}) })
} }
} }
@@ -249,22 +252,21 @@ pub use v14::xlog_utils::{
try_from_pg_timestamp, try_from_pg_timestamp,
}; };
pub fn bkpimage_is_compressed(bimg_info: u8, version: u32) -> bool { pub fn bkpimage_is_compressed(bimg_info: u8, version: PgMajorVersion) -> bool {
dispatch_pgversion!(version, pgv::bindings::bkpimg_is_compressed(bimg_info)) dispatch_pgversion!(version, pgv::bindings::bkpimg_is_compressed(bimg_info))
} }
pub fn generate_wal_segment( pub fn generate_wal_segment(
segno: u64, segno: u64,
system_id: u64, system_id: u64,
pg_version: u32, pg_version: PgMajorVersion,
lsn: Lsn, lsn: Lsn,
) -> Result<Bytes, SerializeError> { ) -> Result<Bytes, SerializeError> {
assert_eq!(segno, lsn.segment_number(WAL_SEGMENT_SIZE)); assert_eq!(segno, lsn.segment_number(WAL_SEGMENT_SIZE));
dispatch_pgversion!( dispatch_pgversion!(
pg_version, pg_version,
pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn), pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn)
Err(SerializeError::BadInput)
) )
} }
@@ -272,7 +274,7 @@ pub fn generate_pg_control(
pg_control_bytes: &[u8], pg_control_bytes: &[u8],
checkpoint_bytes: &[u8], checkpoint_bytes: &[u8],
lsn: Lsn, lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<(Bytes, u64, bool)> { ) -> anyhow::Result<(Bytes, u64, bool)> {
dispatch_pgversion!( dispatch_pgversion!(
pg_version, pg_version,
@@ -352,6 +354,7 @@ pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber {
pub mod waldecoder { pub mod waldecoder {
use std::num::NonZeroU32; use std::num::NonZeroU32;
use crate::PgMajorVersion;
use bytes::{Buf, Bytes, BytesMut}; use bytes::{Buf, Bytes, BytesMut};
use thiserror::Error; use thiserror::Error;
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -369,7 +372,7 @@ pub mod waldecoder {
pub struct WalStreamDecoder { pub struct WalStreamDecoder {
pub lsn: Lsn, pub lsn: Lsn,
pub pg_version: u32, pub pg_version: PgMajorVersion,
pub inputbuf: BytesMut, pub inputbuf: BytesMut,
pub state: State, pub state: State,
} }
@@ -382,7 +385,7 @@ pub mod waldecoder {
} }
impl WalStreamDecoder { impl WalStreamDecoder {
pub fn new(lsn: Lsn, pg_version: u32) -> WalStreamDecoder { pub fn new(lsn: Lsn, pg_version: PgMajorVersion) -> WalStreamDecoder {
WalStreamDecoder { WalStreamDecoder {
lsn, lsn,
pg_version, pg_version,

View File

@@ -1,3 +1,7 @@
use crate::PgMajorVersion;
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG14;
pub const XLOG_DBASE_CREATE: u8 = 0x00; pub const XLOG_DBASE_CREATE: u8 = 0x00;
pub const XLOG_DBASE_DROP: u8 = 0x10; pub const XLOG_DBASE_DROP: u8 = 0x10;

View File

@@ -1,3 +1,7 @@
use crate::PgMajorVersion;
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG15;
pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8;
pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00;

View File

@@ -1,3 +1,7 @@
use crate::PgMajorVersion;
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG16;
pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8;
pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00;

View File

@@ -1,3 +1,7 @@
use crate::PgMajorVersion;
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG17;
pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8;
pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00;

View File

@@ -9,8 +9,8 @@ use utils::bin_ser::DeserializeError;
use utils::lsn::Lsn; use utils::lsn::Lsn;
use crate::{ use crate::{
BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, RepOriginId, BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, PgMajorVersion,
TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, RepOriginId, TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants,
}; };
#[repr(C)] #[repr(C)]
@@ -199,20 +199,17 @@ impl DecodedWALRecord {
/// Check if this WAL record represents a legacy "copy" database creation, which populates new relations /// Check if this WAL record represents a legacy "copy" database creation, which populates new relations
/// by reading other existing relations' data blocks. This is more complex to apply than new-style database /// by reading other existing relations' data blocks. This is more complex to apply than new-style database
/// creations which simply include all the desired blocks in the WAL, so we need a helper function to detect this case. /// creations which simply include all the desired blocks in the WAL, so we need a helper function to detect this case.
pub fn is_dbase_create_copy(&self, pg_version: u32) -> bool { pub fn is_dbase_create_copy(&self, pg_version: PgMajorVersion) -> bool {
if self.xl_rmid == pg_constants::RM_DBASE_ID { if self.xl_rmid == pg_constants::RM_DBASE_ID {
let info = self.xl_info & pg_constants::XLR_RMGR_INFO_MASK; let info = self.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
match pg_version { match pg_version {
14 => { PgMajorVersion::PG14 => {
// Postgres 14 database creations are always the legacy kind // Postgres 14 database creations are always the legacy kind
info == crate::v14::bindings::XLOG_DBASE_CREATE info == crate::v14::bindings::XLOG_DBASE_CREATE
} }
15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY, PgMajorVersion::PG15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY,
16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY, PgMajorVersion::PG16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY,
17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY, PgMajorVersion::PG17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY,
_ => {
panic!("Unsupported postgres version {pg_version}")
}
} }
} else { } else {
false false
@@ -248,7 +245,7 @@ impl DecodedWALRecord {
pub fn decode_wal_record( pub fn decode_wal_record(
record: Bytes, record: Bytes,
decoded: &mut DecodedWALRecord, decoded: &mut DecodedWALRecord,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let mut rnode_spcnode: u32 = 0; let mut rnode_spcnode: u32 = 0;
let mut rnode_dbnode: u32 = 0; let mut rnode_dbnode: u32 = 0;
@@ -1106,9 +1103,9 @@ pub struct XlClogTruncate {
} }
impl XlClogTruncate { impl XlClogTruncate {
pub fn decode(buf: &mut Bytes, pg_version: u32) -> XlClogTruncate { pub fn decode(buf: &mut Bytes, pg_version: PgMajorVersion) -> XlClogTruncate {
XlClogTruncate { XlClogTruncate {
pageno: if pg_version < 17 { pageno: if pg_version < PgMajorVersion::PG17 {
buf.get_u32_le() buf.get_u32_le()
} else { } else {
buf.get_u64_le() as u32 buf.get_u64_le() as u32

View File

@@ -11,9 +11,9 @@ use super::super::waldecoder::WalStreamDecoder;
use super::bindings::{ use super::bindings::{
CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, TimestampTz, CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, TimestampTz,
XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC, XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC,
MY_PGVERSION
}; };
use super::wal_generator::LogicalMessageGenerator; use super::wal_generator::LogicalMessageGenerator;
use super::PG_MAJORVERSION;
use crate::pg_constants; use crate::pg_constants;
use crate::PG_TLI; use crate::PG_TLI;
use crate::{uint32, uint64, Oid}; use crate::{uint32, uint64, Oid};
@@ -233,7 +233,7 @@ pub fn find_end_of_wal(
let mut result = start_lsn; let mut result = start_lsn;
let mut curr_lsn = start_lsn; let mut curr_lsn = start_lsn;
let mut buf = [0u8; XLOG_BLCKSZ]; let mut buf = [0u8; XLOG_BLCKSZ];
let pg_version = PG_MAJORVERSION[1..3].parse::<u32>().unwrap(); let pg_version = MY_PGVERSION;
debug!("find_end_of_wal PG_VERSION: {}", pg_version); debug!("find_end_of_wal PG_VERSION: {}", pg_version);
let mut decoder = WalStreamDecoder::new(start_lsn, pg_version); let mut decoder = WalStreamDecoder::new(start_lsn, pg_version);

View File

@@ -4,6 +4,7 @@ use std::str::FromStr;
use anyhow::*; use anyhow::*;
use clap::{Arg, ArgMatches, Command, value_parser}; use clap::{Arg, ArgMatches, Command, value_parser};
use postgres::Client; use postgres::Client;
use postgres_ffi::PgMajorVersion;
use wal_craft::*; use wal_craft::*;
fn main() -> Result<()> { fn main() -> Result<()> {
@@ -48,7 +49,7 @@ fn main() -> Result<()> {
Some(("with-initdb", arg_matches)) => { Some(("with-initdb", arg_matches)) => {
let cfg = Conf { let cfg = Conf {
pg_version: *arg_matches pg_version: *arg_matches
.get_one::<u32>("pg-version") .get_one::<PgMajorVersion>("pg-version")
.context("'pg-version' is required")?, .context("'pg-version' is required")?,
pg_distrib_dir: arg_matches pg_distrib_dir: arg_matches
.get_one::<PathBuf>("pg-distrib-dir") .get_one::<PathBuf>("pg-distrib-dir")

View File

@@ -9,8 +9,8 @@ use log::*;
use postgres::Client; use postgres::Client;
use postgres::types::PgLsn; use postgres::types::PgLsn;
use postgres_ffi::{ use postgres_ffi::{
WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, PgMajorVersion, WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD,
XLOG_SIZE_OF_XLOG_SHORT_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
}; };
macro_rules! xlog_utils_test { macro_rules! xlog_utils_test {
@@ -29,7 +29,7 @@ macro_rules! xlog_utils_test {
postgres_ffi::for_all_postgres_versions! { xlog_utils_test } postgres_ffi::for_all_postgres_versions! { xlog_utils_test }
pub struct Conf { pub struct Conf {
pub pg_version: u32, pub pg_version: PgMajorVersion,
pub pg_distrib_dir: PathBuf, pub pg_distrib_dir: PathBuf,
pub datadir: PathBuf, pub datadir: PathBuf,
} }
@@ -52,11 +52,7 @@ impl Conf {
pub fn pg_distrib_dir(&self) -> anyhow::Result<PathBuf> { pub fn pg_distrib_dir(&self) -> anyhow::Result<PathBuf> {
let path = self.pg_distrib_dir.clone(); let path = self.pg_distrib_dir.clone();
#[allow(clippy::manual_range_patterns)] Ok(path.join(self.pg_version.v_str()))
match self.pg_version {
14 | 15 | 16 | 17 => Ok(path.join(format!("v{}", self.pg_version))),
_ => bail!("Unsupported postgres version: {}", self.pg_version),
}
} }
fn pg_bin_dir(&self) -> anyhow::Result<PathBuf> { fn pg_bin_dir(&self) -> anyhow::Result<PathBuf> {

View File

@@ -24,7 +24,7 @@ fn init_logging() {
fn test_end_of_wal<C: crate::Crafter>(test_name: &str) { fn test_end_of_wal<C: crate::Crafter>(test_name: &str) {
use crate::*; use crate::*;
let pg_version = PG_MAJORVERSION[1..3].parse::<u32>().unwrap(); let pg_version = MY_PGVERSION;
// Craft some WAL // Craft some WAL
let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))

View File

@@ -9,4 +9,5 @@ anyhow.workspace = true
tokio.workspace = true tokio.workspace = true
camino.workspace = true camino.workspace = true
thiserror.workspace = true thiserror.workspace = true
postgres_versioninfo.workspace = true
workspace_hack = { version = "0.1", path = "../../workspace_hack" } workspace_hack = { version = "0.1", path = "../../workspace_hack" }

View File

@@ -7,12 +7,13 @@
use std::fmt; use std::fmt;
use camino::Utf8Path; use camino::Utf8Path;
use postgres_versioninfo::PgMajorVersion;
pub struct RunInitdbArgs<'a> { pub struct RunInitdbArgs<'a> {
pub superuser: &'a str, pub superuser: &'a str,
pub locale: &'a str, pub locale: &'a str,
pub initdb_bin: &'a Utf8Path, pub initdb_bin: &'a Utf8Path,
pub pg_version: u32, pub pg_version: PgMajorVersion,
pub library_search_path: &'a Utf8Path, pub library_search_path: &'a Utf8Path,
pub pgdata: &'a Utf8Path, pub pgdata: &'a Utf8Path,
} }
@@ -79,12 +80,16 @@ pub async fn do_run_initdb(args: RunInitdbArgs<'_>) -> Result<(), Error> {
.stderr(std::process::Stdio::piped()); .stderr(std::process::Stdio::piped());
// Before version 14, only the libc provide was available. // Before version 14, only the libc provide was available.
if pg_version > 14 { if pg_version > PgMajorVersion::PG14 {
// Version 17 brought with it a builtin locale provider which only provides // Version 17 brought with it a builtin locale provider which only provides
// C and C.UTF-8. While being safer for collation purposes since it is // C and C.UTF-8. While being safer for collation purposes since it is
// guaranteed to be consistent throughout a major release, it is also more // guaranteed to be consistent throughout a major release, it is also more
// performant. // performant.
let locale_provider = if pg_version >= 17 { "builtin" } else { "libc" }; let locale_provider = if pg_version >= PgMajorVersion::PG17 {
"builtin"
} else {
"libc"
};
initdb_command.args(["--locale-provider", locale_provider]); initdb_command.args(["--locale-provider", locale_provider]);
} }

View File

@@ -0,0 +1,12 @@
[package]
name = "postgres_versioninfo"
version = "0.1.0"
edition = "2024"
license.workspace = true
[dependencies]
anyhow.workspace = true
thiserror.workspace = true
serde.workspace = true
serde_repr.workspace = true
workspace_hack = { version = "0.1", path = "../../workspace_hack" }

View File

@@ -0,0 +1,175 @@
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_repr::{Deserialize_repr, Serialize_repr};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
/// An enum with one variant for each major version of PostgreSQL that we support.
///
#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Deserialize_repr, Serialize_repr)]
#[repr(u32)]
pub enum PgMajorVersion {
PG14 = 14,
PG15 = 15,
PG16 = 16,
PG17 = 17,
// !!! When you add a new PgMajorVersion, don't forget to update PgMajorVersion::ALL
}
/// A full PostgreSQL version ID, in MMmmbb numerical format (Major/minor/bugfix)
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct PgVersionId(u32);
impl PgVersionId {
pub const UNKNOWN: PgVersionId = PgVersionId(0);
pub fn from_full_pg_version(version: u32) -> PgVersionId {
match version {
0 => PgVersionId(version), // unknown version
140000..180000 => PgVersionId(version),
_ => panic!("Invalid full PostgreSQL version ID {version}"),
}
}
}
impl Display for PgVersionId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
u32::fmt(&self.0, f)
}
}
impl Serialize for PgVersionId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
u32::serialize(&self.0, serializer)
}
}
impl<'de> Deserialize<'de> for PgVersionId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
u32::deserialize(deserializer).map(PgVersionId)
}
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
u32::deserialize_in_place(deserializer, &mut place.0)
}
}
impl PgMajorVersion {
/// Get the numerical representation of the represented Major Version
pub const fn major_version_num(&self) -> u32 {
match self {
PgMajorVersion::PG14 => 14,
PgMajorVersion::PG15 => 15,
PgMajorVersion::PG16 => 16,
PgMajorVersion::PG17 => 17,
}
}
/// Get the contents of this version's PG_VERSION file.
///
/// The PG_VERSION file is used to determine the PostgreSQL version that currently
/// owns the data in a PostgreSQL data directory.
pub fn versionfile_string(&self) -> &'static str {
match self {
PgMajorVersion::PG14 => "14",
PgMajorVersion::PG15 => "15",
PgMajorVersion::PG16 => "16\x0A",
PgMajorVersion::PG17 => "17\x0A",
}
}
/// Get the v{version} string of this major PostgreSQL version.
///
/// Because this was hand-coded in various places, this was moved into a shared
/// implementation.
pub fn v_str(&self) -> String {
match self {
PgMajorVersion::PG14 => "v14",
PgMajorVersion::PG15 => "v15",
PgMajorVersion::PG16 => "v16",
PgMajorVersion::PG17 => "v17",
}
.to_string()
}
/// All currently supported major versions of PostgreSQL.
pub const ALL: &'static [PgMajorVersion] = &[
PgMajorVersion::PG14,
PgMajorVersion::PG15,
PgMajorVersion::PG16,
PgMajorVersion::PG17,
];
}
impl Display for PgMajorVersion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
PgMajorVersion::PG14 => "PgMajorVersion::PG14",
PgMajorVersion::PG15 => "PgMajorVersion::PG15",
PgMajorVersion::PG16 => "PgMajorVersion::PG16",
PgMajorVersion::PG17 => "PgMajorVersion::PG17",
})
}
}
#[derive(Debug, thiserror::Error)]
#[allow(dead_code)]
pub struct InvalidPgVersion(u32);
impl Display for InvalidPgVersion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "InvalidPgVersion({})", self.0)
}
}
impl TryFrom<PgVersionId> for PgMajorVersion {
type Error = InvalidPgVersion;
fn try_from(value: PgVersionId) -> Result<Self, Self::Error> {
Ok(match value.0 / 10000 {
14 => PgMajorVersion::PG14,
15 => PgMajorVersion::PG15,
16 => PgMajorVersion::PG16,
17 => PgMajorVersion::PG17,
_ => return Err(InvalidPgVersion(value.0)),
})
}
}
impl From<PgMajorVersion> for PgVersionId {
fn from(value: PgMajorVersion) -> Self {
PgVersionId((value as u32) * 10000)
}
}
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
pub struct PgMajorVersionParseError(String);
impl Display for PgMajorVersionParseError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "PgMajorVersionParseError({})", self.0)
}
}
impl FromStr for PgMajorVersion {
type Err = PgMajorVersionParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"14" => PgMajorVersion::PG14,
"15" => PgMajorVersion::PG15,
"16" => PgMajorVersion::PG16,
"17" => PgMajorVersion::PG17,
_ => return Err(PgMajorVersionParseError(s.to_string())),
})
}
}

View File

@@ -10,6 +10,7 @@ const_format.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
postgres_ffi.workspace = true postgres_ffi.workspace = true
postgres_versioninfo.workspace = true
pq_proto.workspace = true pq_proto.workspace = true
tokio.workspace = true tokio.workspace = true
utils.workspace = true utils.workspace = true

View File

@@ -8,6 +8,8 @@ pub mod membership;
/// Public API types /// Public API types
pub mod models; pub mod models;
pub use postgres_versioninfo::{PgMajorVersion, PgVersionId};
/// Consensus logical timestamp. Note: it is a part of sk control file. /// Consensus logical timestamp. Note: it is a part of sk control file.
pub type Term = u64; pub type Term = u64;
/// With this term timeline is created initially. It /// With this term timeline is created initially. It
@@ -20,7 +22,7 @@ pub const INITIAL_TERM: Term = 0;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerInfo { pub struct ServerInfo {
/// Postgres server version /// Postgres server version
pub pg_version: u32, pub pg_version: PgVersionId,
pub system_id: SystemId, pub system_id: SystemId,
pub wal_seg_size: u32, pub wal_seg_size: u32,
} }

View File

@@ -4,6 +4,7 @@ use std::net::SocketAddr;
use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardIdentity;
use postgres_ffi::TimestampTz; use postgres_ffi::TimestampTz;
use postgres_versioninfo::PgVersionId;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::time::Instant; use tokio::time::Instant;
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
@@ -23,8 +24,7 @@ pub struct TimelineCreateRequest {
pub tenant_id: TenantId, pub tenant_id: TenantId,
pub timeline_id: TimelineId, pub timeline_id: TimelineId,
pub mconf: Configuration, pub mconf: Configuration,
/// In the PG_VERSION_NUM macro format, like 140017. pub pg_version: PgVersionId,
pub pg_version: u32,
pub system_id: Option<u64>, pub system_id: Option<u64>,
// By default WAL_SEGMENT_SIZE // By default WAL_SEGMENT_SIZE
pub wal_seg_size: Option<u32>, pub wal_seg_size: Option<u32>,

View File

@@ -10,7 +10,7 @@ use futures::StreamExt;
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use pageserver_api::shard::{ShardIdentity, ShardStripeSize}; use pageserver_api::shard::{ShardIdentity, ShardStripeSize};
use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_ffi::{MAX_SEND_SIZE, WAL_SEGMENT_SIZE}; use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, WAL_SEGMENT_SIZE};
use pprof::criterion::{Output, PProfProfiler}; use pprof::criterion::{Output, PProfProfiler};
use remote_storage::{ use remote_storage::{
DownloadOpts, GenericRemoteStorage, ListingMode, RemoteStorageConfig, RemoteStorageKind, DownloadOpts, GenericRemoteStorage, ListingMode, RemoteStorageConfig, RemoteStorageKind,
@@ -115,7 +115,7 @@ struct BenchmarkData {
#[derive(Deserialize)] #[derive(Deserialize)]
struct BenchmarkMetadata { struct BenchmarkMetadata {
pg_version: u32, pg_version: PgMajorVersion,
start_lsn: Lsn, start_lsn: Lsn,
} }

View File

@@ -7,8 +7,8 @@ use bytes::{Buf, Bytes};
use pageserver_api::key::rel_block_to_key; use pageserver_api::key::rel_block_to_key;
use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::reltag::{RelTag, SlruKind};
use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardIdentity;
use postgres_ffi::pg_constants;
use postgres_ffi::walrecord::*; use postgres_ffi::walrecord::*;
use postgres_ffi::{PgMajorVersion, pg_constants};
use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM; use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM;
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -24,7 +24,7 @@ impl InterpretedWalRecord {
buf: Bytes, buf: Bytes,
shards: &[ShardIdentity], shards: &[ShardIdentity],
next_record_lsn: Lsn, next_record_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<HashMap<ShardIdentity, InterpretedWalRecord>> { ) -> anyhow::Result<HashMap<ShardIdentity, InterpretedWalRecord>> {
let mut decoded = DecodedWALRecord::default(); let mut decoded = DecodedWALRecord::default();
decode_wal_record(buf, &mut decoded, pg_version)?; decode_wal_record(buf, &mut decoded, pg_version)?;
@@ -78,7 +78,7 @@ impl MetadataRecord {
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
shard_records: &mut HashMap<ShardIdentity, InterpretedWalRecord>, shard_records: &mut HashMap<ShardIdentity, InterpretedWalRecord>,
next_record_lsn: Lsn, next_record_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// Note: this doesn't actually copy the bytes since // Note: this doesn't actually copy the bytes since
// the [`Bytes`] type implements it via a level of indirection. // the [`Bytes`] type implements it via a level of indirection.
@@ -193,7 +193,7 @@ impl MetadataRecord {
fn decode_heapam_record( fn decode_heapam_record(
buf: &mut Bytes, buf: &mut Bytes,
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<Option<MetadataRecord>> { ) -> anyhow::Result<Option<MetadataRecord>> {
// Handle VM bit updates that are implicitly part of heap records. // Handle VM bit updates that are implicitly part of heap records.
@@ -205,7 +205,7 @@ impl MetadataRecord {
let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS; let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
match pg_version { match pg_version {
14 => { PgMajorVersion::PG14 => {
if decoded.xl_rmid == pg_constants::RM_HEAP_ID { if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
@@ -272,7 +272,7 @@ impl MetadataRecord {
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
} }
} }
15 => { PgMajorVersion::PG15 => {
if decoded.xl_rmid == pg_constants::RM_HEAP_ID { if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
@@ -339,7 +339,7 @@ impl MetadataRecord {
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
} }
} }
16 => { PgMajorVersion::PG16 => {
if decoded.xl_rmid == pg_constants::RM_HEAP_ID { if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
@@ -406,7 +406,7 @@ impl MetadataRecord {
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
} }
} }
17 => { PgMajorVersion::PG17 => {
if decoded.xl_rmid == pg_constants::RM_HEAP_ID { if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
@@ -473,7 +473,6 @@ impl MetadataRecord {
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
} }
} }
_ => {}
} }
if new_heap_blkno.is_some() || old_heap_blkno.is_some() { if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
@@ -500,7 +499,7 @@ impl MetadataRecord {
fn decode_neonmgr_record( fn decode_neonmgr_record(
buf: &mut Bytes, buf: &mut Bytes,
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<Option<MetadataRecord>> { ) -> anyhow::Result<Option<MetadataRecord>> {
// Handle VM bit updates that are implicitly part of heap records. // Handle VM bit updates that are implicitly part of heap records.
@@ -514,7 +513,7 @@ impl MetadataRecord {
assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID); assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID);
match pg_version { match pg_version {
16 | 17 => { PgMajorVersion::PG16 | PgMajorVersion::PG17 => {
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
match info { match info {
@@ -574,7 +573,7 @@ impl MetadataRecord {
info => anyhow::bail!("Unknown WAL record type for Neon RMGR: {}", info), info => anyhow::bail!("Unknown WAL record type for Neon RMGR: {}", info),
} }
} }
_ => anyhow::bail!( PgMajorVersion::PG15 | PgMajorVersion::PG14 => anyhow::bail!(
"Neon RMGR has no known compatibility with PostgreSQL version {}", "Neon RMGR has no known compatibility with PostgreSQL version {}",
pg_version pg_version
), ),
@@ -629,14 +628,15 @@ impl MetadataRecord {
fn decode_dbase_record( fn decode_dbase_record(
buf: &mut Bytes, buf: &mut Bytes,
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<Option<MetadataRecord>> { ) -> anyhow::Result<Option<MetadataRecord>> {
// TODO: Refactor this to avoid the duplication between postgres versions. // TODO: Refactor this to avoid the duplication between postgres versions.
let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
tracing::debug!(%info, %pg_version, "handle RM_DBASE_ID"); tracing::debug!(%info, %pg_version, "handle RM_DBASE_ID");
if pg_version == 14 { match pg_version {
PgMajorVersion::PG14 => {
if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE { if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
let createdb = XlCreateDatabase::decode(buf); let createdb = XlCreateDatabase::decode(buf);
tracing::debug!("XLOG_DBASE_CREATE v14"); tracing::debug!("XLOG_DBASE_CREATE v14");
@@ -659,7 +659,8 @@ impl MetadataRecord {
return Ok(Some(record)); return Ok(Some(record));
} }
} else if pg_version == 15 { }
PgMajorVersion::PG15 => {
if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG { if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
} else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY { } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
@@ -686,7 +687,8 @@ impl MetadataRecord {
return Ok(Some(record)); return Ok(Some(record));
} }
} else if pg_version == 16 { }
PgMajorVersion::PG16 => {
if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG { if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
} else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY { } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
@@ -713,7 +715,8 @@ impl MetadataRecord {
return Ok(Some(record)); return Ok(Some(record));
} }
} else if pg_version == 17 { }
PgMajorVersion::PG17 => {
if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG { if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG {
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
} else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY { } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY {
@@ -741,6 +744,7 @@ impl MetadataRecord {
return Ok(Some(record)); return Ok(Some(record));
} }
} }
}
Ok(None) Ok(None)
} }
@@ -748,12 +752,12 @@ impl MetadataRecord {
fn decode_clog_record( fn decode_clog_record(
buf: &mut Bytes, buf: &mut Bytes,
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<Option<MetadataRecord>> { ) -> anyhow::Result<Option<MetadataRecord>> {
let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK; let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
if info == pg_constants::CLOG_ZEROPAGE { if info == pg_constants::CLOG_ZEROPAGE {
let pageno = if pg_version < 17 { let pageno = if pg_version < PgMajorVersion::PG17 {
buf.get_u32_le() buf.get_u32_le()
} else { } else {
buf.get_u64_le() as u32 buf.get_u64_le() as u32
@@ -765,7 +769,7 @@ impl MetadataRecord {
ClogZeroPage { segno, rpageno }, ClogZeroPage { segno, rpageno },
)))) ))))
} else { } else {
assert!(info == pg_constants::CLOG_TRUNCATE); assert_eq!(info, pg_constants::CLOG_TRUNCATE);
let xlrec = XlClogTruncate::decode(buf, pg_version); let xlrec = XlClogTruncate::decode(buf, pg_version);
Ok(Some(MetadataRecord::Clog(ClogRecord::Truncate( Ok(Some(MetadataRecord::Clog(ClogRecord::Truncate(
@@ -838,14 +842,14 @@ impl MetadataRecord {
fn decode_multixact_record( fn decode_multixact_record(
buf: &mut Bytes, buf: &mut Bytes,
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<Option<MetadataRecord>> { ) -> anyhow::Result<Option<MetadataRecord>> {
let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE
|| info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE || info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE
{ {
let pageno = if pg_version < 17 { let pageno = if pg_version < PgMajorVersion::PG17 {
buf.get_u32_le() buf.get_u32_le()
} else { } else {
buf.get_u64_le() as u32 buf.get_u64_le() as u32

View File

@@ -13,7 +13,7 @@ use pageserver_api::keyspace::KeySpace;
use pageserver_api::reltag::RelTag; use pageserver_api::reltag::RelTag;
use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardIdentity;
use postgres_ffi::walrecord::{DecodedBkpBlock, DecodedWALRecord}; use postgres_ffi::walrecord::{DecodedBkpBlock, DecodedWALRecord};
use postgres_ffi::{BLCKSZ, page_is_new, page_set_lsn, pg_constants}; use postgres_ffi::{BLCKSZ, PgMajorVersion, page_is_new, page_set_lsn, pg_constants};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use utils::bin_ser::BeSer; use utils::bin_ser::BeSer;
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -139,7 +139,7 @@ impl SerializedValueBatch {
decoded: DecodedWALRecord, decoded: DecodedWALRecord,
shard_records: &mut HashMap<ShardIdentity, InterpretedWalRecord>, shard_records: &mut HashMap<ShardIdentity, InterpretedWalRecord>,
next_record_lsn: Lsn, next_record_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// First determine how big the buffers need to be and allocate it up-front. // First determine how big the buffers need to be and allocate it up-front.
// This duplicates some of the work below, but it's empirically much faster. // This duplicates some of the work below, but it's empirically much faster.
@@ -267,7 +267,7 @@ impl SerializedValueBatch {
fn estimate_buffer_size( fn estimate_buffer_size(
decoded: &DecodedWALRecord, decoded: &DecodedWALRecord,
shard: &ShardIdentity, shard: &ShardIdentity,
pg_version: u32, pg_version: PgMajorVersion,
) -> usize { ) -> usize {
let mut estimate: usize = 0; let mut estimate: usize = 0;
@@ -303,7 +303,11 @@ impl SerializedValueBatch {
estimate estimate
} }
fn block_is_image(decoded: &DecodedWALRecord, blk: &DecodedBkpBlock, pg_version: u32) -> bool { fn block_is_image(
decoded: &DecodedWALRecord,
blk: &DecodedBkpBlock,
pg_version: PgMajorVersion,
) -> bool {
blk.apply_image blk.apply_image
&& blk.has_image && blk.has_image
&& decoded.xl_rmid == pg_constants::RM_XLOG_ID && decoded.xl_rmid == pg_constants::RM_XLOG_ID

File diff suppressed because one or more lines are too long

View File

@@ -18,6 +18,7 @@ workspace_hack = { version = "0.1", path = "../../workspace_hack" }
tokio-postgres.workspace = true tokio-postgres.workspace = true
tokio-stream.workspace = true tokio-stream.workspace = true
tokio.workspace = true tokio.workspace = true
postgres_versioninfo.workspace = true
futures.workspace = true futures.workspace = true
tokio-util.workspace = true tokio-util.workspace = true
anyhow.workspace = true anyhow.workspace = true

View File

@@ -7,6 +7,7 @@ use detach_ancestor::AncestorDetached;
use http_utils::error::HttpErrorBody; use http_utils::error::HttpErrorBody;
use pageserver_api::models::*; use pageserver_api::models::*;
use pageserver_api::shard::TenantShardId; use pageserver_api::shard::TenantShardId;
use postgres_versioninfo::PgMajorVersion;
pub use reqwest::Body as ReqwestBody; pub use reqwest::Body as ReqwestBody;
use reqwest::{IntoUrl, Method, StatusCode, Url}; use reqwest::{IntoUrl, Method, StatusCode, Url};
use utils::id::{TenantId, TimelineId}; use utils::id::{TenantId, TimelineId};
@@ -745,9 +746,11 @@ impl Client {
timeline_id: TimelineId, timeline_id: TimelineId,
base_lsn: Lsn, base_lsn: Lsn,
end_lsn: Lsn, end_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
basebackup_tarball: ReqwestBody, basebackup_tarball: ReqwestBody,
) -> Result<()> { ) -> Result<()> {
let pg_version = pg_version.major_version_num();
let uri = format!( let uri = format!(
"{}/v1/tenant/{tenant_id}/timeline/{timeline_id}/import_basebackup?base_lsn={base_lsn}&end_lsn={end_lsn}&pg_version={pg_version}", "{}/v1/tenant/{tenant_id}/timeline/{timeline_id}/import_basebackup?base_lsn={base_lsn}&end_lsn={end_lsn}&pg_version={pg_version}",
self.mgmt_api_endpoint, self.mgmt_api_endpoint,

View File

@@ -20,7 +20,8 @@ use pageserver_api::key::{Key, rel_block_to_key};
use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::reltag::{RelTag, SlruKind};
use postgres_ffi::pg_constants::{PG_HBA, PGDATA_SPECIAL_FILES}; use postgres_ffi::pg_constants::{PG_HBA, PGDATA_SPECIAL_FILES};
use postgres_ffi::{ use postgres_ffi::{
BLCKSZ, PG_TLI, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName, dispatch_pgversion, pg_constants, BLCKSZ, PG_TLI, PgMajorVersion, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName,
dispatch_pgversion, pg_constants,
}; };
use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
use postgres_ffi_types::forknum::{INIT_FORKNUM, MAIN_FORKNUM}; use postgres_ffi_types::forknum::{INIT_FORKNUM, MAIN_FORKNUM};
@@ -619,10 +620,7 @@ where
}; };
if spcnode == GLOBALTABLESPACE_OID { if spcnode == GLOBALTABLESPACE_OID {
let pg_version_str = match self.timeline.pg_version { let pg_version_str = self.timeline.pg_version.versionfile_string();
14 | 15 => self.timeline.pg_version.to_string(),
ver => format!("{ver}\x0A"),
};
let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?; let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
self.ar self.ar
.append(&header, pg_version_str.as_bytes()) .append(&header, pg_version_str.as_bytes())
@@ -679,10 +677,7 @@ where
if let Some(img) = relmap_img { if let Some(img) = relmap_img {
let dst_path = format!("base/{dbnode}/PG_VERSION"); let dst_path = format!("base/{dbnode}/PG_VERSION");
let pg_version_str = match self.timeline.pg_version { let pg_version_str = self.timeline.pg_version.versionfile_string();
14 | 15 => self.timeline.pg_version.to_string(),
ver => format!("{ver}\x0A"),
};
let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?; let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
self.ar self.ar
.append(&header, pg_version_str.as_bytes()) .append(&header, pg_version_str.as_bytes())
@@ -713,7 +708,7 @@ where
buf.extend_from_slice(&img[..]); buf.extend_from_slice(&img[..]);
let crc = crc32c::crc32c(&img[..]); let crc = crc32c::crc32c(&img[..]);
buf.put_u32_le(crc); buf.put_u32_le(crc);
let path = if self.timeline.pg_version < 17 { let path = if self.timeline.pg_version < PgMajorVersion::PG17 {
format!("pg_twophase/{xid:>08X}") format!("pg_twophase/{xid:>08X}")
} else { } else {
format!("pg_twophase/{xid:>016X}") format!("pg_twophase/{xid:>016X}")

View File

@@ -11,7 +11,7 @@ use std::num::NonZeroUsize;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use anyhow::{Context, bail, ensure}; use anyhow::{Context, ensure};
use camino::{Utf8Path, Utf8PathBuf}; use camino::{Utf8Path, Utf8PathBuf};
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
use pageserver_api::config::{ use pageserver_api::config::{
@@ -22,6 +22,7 @@ use pageserver_api::models::ImageCompressionAlgorithm;
use pageserver_api::shard::TenantShardId; use pageserver_api::shard::TenantShardId;
use pem::Pem; use pem::Pem;
use postgres_backend::AuthType; use postgres_backend::AuthType;
use postgres_ffi::PgMajorVersion;
use remote_storage::{RemotePath, RemoteStorageConfig}; use remote_storage::{RemotePath, RemoteStorageConfig};
use reqwest::Url; use reqwest::Url;
use storage_broker::Uri; use storage_broker::Uri;
@@ -338,20 +339,16 @@ impl PageServerConf {
// //
// Postgres distribution paths // Postgres distribution paths
// //
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<Utf8PathBuf> { pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<Utf8PathBuf> {
let path = self.pg_distrib_dir.clone(); let path = self.pg_distrib_dir.clone();
#[allow(clippy::manual_range_patterns)] Ok(path.join(pg_version.v_str()))
match pg_version {
14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))),
_ => bail!("Unsupported postgres version: {}", pg_version),
}
} }
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<Utf8PathBuf> { pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<Utf8PathBuf> {
Ok(self.pg_distrib_dir(pg_version)?.join("bin")) Ok(self.pg_distrib_dir(pg_version)?.join("bin"))
} }
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<Utf8PathBuf> { pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<Utf8PathBuf> {
Ok(self.pg_distrib_dir(pg_version)?.join("lib")) Ok(self.pg_distrib_dir(pg_version)?.join("lib"))
} }

View File

@@ -41,6 +41,7 @@ use pageserver_api::models::{
TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse, TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse,
}; };
use pageserver_api::shard::{ShardCount, TenantShardId}; use pageserver_api::shard::{ShardCount, TenantShardId};
use postgres_ffi::PgMajorVersion;
use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError}; use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError};
use scopeguard::defer; use scopeguard::defer;
use serde_json::json; use serde_json::json;
@@ -3385,7 +3386,7 @@ async fn put_tenant_timeline_import_basebackup(
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?; let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?; let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
let pg_version: u32 = must_parse_query_param(&request, "pg_version")?; let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?;
check_permission(&request, Some(tenant_id))?; check_permission(&request, Some(tenant_id))?;

View File

@@ -38,6 +38,7 @@ pub mod walredo;
use camino::Utf8Path; use camino::Utf8Path;
use deletion_queue::DeletionQueue; use deletion_queue::DeletionQueue;
use postgres_ffi::PgMajorVersion;
use tenant::mgr::{BackgroundPurges, TenantManager}; use tenant::mgr::{BackgroundPurges, TenantManager};
use tenant::secondary; use tenant::secondary;
use tracing::{info, info_span}; use tracing::{info, info_span};
@@ -51,7 +52,7 @@ use tracing::{info, info_span};
/// backwards-compatible changes to the metadata format. /// backwards-compatible changes to the metadata format.
pub const STORAGE_FORMAT_VERSION: u16 = 3; pub const STORAGE_FORMAT_VERSION: u16 = 3;
pub const DEFAULT_PG_VERSION: u32 = 17; pub const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
// Magic constants used to identify different kinds of files // Magic constants used to identify different kinds of files
pub const IMAGE_FILE_MAGIC: u16 = 0x5A60; pub const IMAGE_FILE_MAGIC: u16 = 0x5A60;

View File

@@ -25,7 +25,7 @@ use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace};
use pageserver_api::models::RelSizeMigration; use pageserver_api::models::RelSizeMigration;
use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardIdentity;
use postgres_ffi::{BLCKSZ, TimestampTz, TransactionId}; use postgres_ffi::{BLCKSZ, PgMajorVersion, TimestampTz, TransactionId};
use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
use postgres_ffi_types::{Oid, RepOriginId}; use postgres_ffi_types::{Oid, RepOriginId};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -1081,7 +1081,7 @@ impl Timeline {
// fetch directory entry // fetch directory entry
let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?; let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
if self.pg_version >= 17 { if self.pg_version >= PgMajorVersion::PG17 {
Ok(TwoPhaseDirectoryV17::des(&buf)?.xids) Ok(TwoPhaseDirectoryV17::des(&buf)?.xids)
} else { } else {
Ok(TwoPhaseDirectory::des(&buf)? Ok(TwoPhaseDirectory::des(&buf)?
@@ -1613,7 +1613,7 @@ impl DatadirModification<'_> {
.push((DirectoryKind::Db, MetricsUpdate::Set(0))); .push((DirectoryKind::Db, MetricsUpdate::Set(0)));
self.put(DBDIR_KEY, Value::Image(buf.into())); self.put(DBDIR_KEY, Value::Image(buf.into()));
let buf = if self.tline.pg_version >= 17 { let buf = if self.tline.pg_version >= PgMajorVersion::PG17 {
TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 { TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 {
xids: HashSet::new(), xids: HashSet::new(),
}) })
@@ -1967,7 +1967,7 @@ impl DatadirModification<'_> {
) -> Result<(), WalIngestError> { ) -> Result<(), WalIngestError> {
// Add it to the directory entry // Add it to the directory entry
let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?; let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?;
let newdirbuf = if self.tline.pg_version >= 17 { let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 {
let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?; let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?;
if !dir.xids.insert(xid) { if !dir.xids.insert(xid) {
Err(WalIngestErrorKind::FileAlreadyExists(xid))?; Err(WalIngestErrorKind::FileAlreadyExists(xid))?;
@@ -2383,7 +2383,7 @@ impl DatadirModification<'_> {
) -> Result<(), WalIngestError> { ) -> Result<(), WalIngestError> {
// Remove it from the directory entry // Remove it from the directory entry
let buf = self.get(TWOPHASEDIR_KEY, ctx).await?; let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
let newdirbuf = if self.tline.pg_version >= 17 { let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 {
let mut dir = TwoPhaseDirectoryV17::des(&buf)?; let mut dir = TwoPhaseDirectoryV17::des(&buf)?;
if !dir.xids.remove(&xid) { if !dir.xids.remove(&xid) {

View File

@@ -38,6 +38,7 @@ use pageserver_api::models::{
WalRedoManagerStatus, WalRedoManagerStatus,
}; };
use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId}; use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId};
use postgres_ffi::PgMajorVersion;
use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel}; use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel};
use remote_timeline_client::index::GcCompactionState; use remote_timeline_client::index::GcCompactionState;
use remote_timeline_client::manifest::{ use remote_timeline_client::manifest::{
@@ -497,7 +498,7 @@ impl WalRedoManager {
lsn: Lsn, lsn: Lsn,
base_img: Option<(Lsn, bytes::Bytes)>, base_img: Option<(Lsn, bytes::Bytes)>,
records: Vec<(Lsn, wal_decoder::models::record::NeonWalRecord)>, records: Vec<(Lsn, wal_decoder::models::record::NeonWalRecord)>,
pg_version: u32, pg_version: PgMajorVersion,
redo_attempt_type: RedoAttemptType, redo_attempt_type: RedoAttemptType,
) -> Result<bytes::Bytes, walredo::Error> { ) -> Result<bytes::Bytes, walredo::Error> {
match self { match self {
@@ -933,7 +934,7 @@ pub(crate) enum CreateTimelineParams {
pub(crate) struct CreateTimelineParamsBootstrap { pub(crate) struct CreateTimelineParamsBootstrap {
pub(crate) new_timeline_id: TimelineId, pub(crate) new_timeline_id: TimelineId,
pub(crate) existing_initdb_timeline_id: Option<TimelineId>, pub(crate) existing_initdb_timeline_id: Option<TimelineId>,
pub(crate) pg_version: u32, pub(crate) pg_version: PgMajorVersion,
} }
/// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here. /// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here.
@@ -971,7 +972,7 @@ pub(crate) enum CreateTimelineIdempotency {
/// NB: special treatment, see comment in [`Self`]. /// NB: special treatment, see comment in [`Self`].
FailWithConflict, FailWithConflict,
Bootstrap { Bootstrap {
pg_version: u32, pg_version: PgMajorVersion,
}, },
/// NB: branches always have the same `pg_version` as their ancestor. /// NB: branches always have the same `pg_version` as their ancestor.
/// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`] /// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`]
@@ -2541,7 +2542,7 @@ impl TenantShard {
self: &Arc<Self>, self: &Arc<Self>,
new_timeline_id: TimelineId, new_timeline_id: TimelineId,
initdb_lsn: Lsn, initdb_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
ctx: &RequestContext, ctx: &RequestContext,
) -> anyhow::Result<(UninitializedTimeline, RequestContext)> { ) -> anyhow::Result<(UninitializedTimeline, RequestContext)> {
anyhow::ensure!( anyhow::ensure!(
@@ -2593,7 +2594,7 @@ impl TenantShard {
self: &Arc<Self>, self: &Arc<Self>,
new_timeline_id: TimelineId, new_timeline_id: TimelineId,
initdb_lsn: Lsn, initdb_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
ctx: &RequestContext, ctx: &RequestContext,
) -> anyhow::Result<Arc<Timeline>> { ) -> anyhow::Result<Arc<Timeline>> {
let (uninit_tl, ctx) = self let (uninit_tl, ctx) = self
@@ -2632,7 +2633,7 @@ impl TenantShard {
self: &Arc<Self>, self: &Arc<Self>,
new_timeline_id: TimelineId, new_timeline_id: TimelineId,
initdb_lsn: Lsn, initdb_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
ctx: &RequestContext, ctx: &RequestContext,
in_memory_layer_desc: Vec<timeline::InMemoryLayerTestDesc>, in_memory_layer_desc: Vec<timeline::InMemoryLayerTestDesc>,
delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>, delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>,
@@ -2898,7 +2899,7 @@ impl TenantShard {
Lsn(0), Lsn(0),
initdb_lsn, initdb_lsn,
initdb_lsn, initdb_lsn,
15, PgMajorVersion::PG15,
); );
this.prepare_new_timeline( this.prepare_new_timeline(
new_timeline_id, new_timeline_id,
@@ -5090,7 +5091,7 @@ impl TenantShard {
pub(crate) async fn bootstrap_timeline_test( pub(crate) async fn bootstrap_timeline_test(
self: &Arc<Self>, self: &Arc<Self>,
timeline_id: TimelineId, timeline_id: TimelineId,
pg_version: u32, pg_version: PgMajorVersion,
load_existing_initdb: Option<TimelineId>, load_existing_initdb: Option<TimelineId>,
ctx: &RequestContext, ctx: &RequestContext,
) -> anyhow::Result<Arc<Timeline>> { ) -> anyhow::Result<Arc<Timeline>> {
@@ -5232,7 +5233,7 @@ impl TenantShard {
async fn bootstrap_timeline( async fn bootstrap_timeline(
self: &Arc<Self>, self: &Arc<Self>,
timeline_id: TimelineId, timeline_id: TimelineId,
pg_version: u32, pg_version: PgMajorVersion,
load_existing_initdb: Option<TimelineId>, load_existing_initdb: Option<TimelineId>,
ctx: &RequestContext, ctx: &RequestContext,
) -> Result<CreateTimelineResult, CreateTimelineError> { ) -> Result<CreateTimelineResult, CreateTimelineError> {
@@ -5770,7 +5771,7 @@ impl TenantShard {
async fn run_initdb( async fn run_initdb(
conf: &'static PageServerConf, conf: &'static PageServerConf,
initdb_target_dir: &Utf8Path, initdb_target_dir: &Utf8Path,
pg_version: u32, pg_version: PgMajorVersion,
cancel: &CancellationToken, cancel: &CancellationToken,
) -> Result<(), InitdbError> { ) -> Result<(), InitdbError> {
let initdb_bin_path = conf let initdb_bin_path = conf
@@ -6051,7 +6052,7 @@ pub(crate) mod harness {
lsn: Lsn, lsn: Lsn,
base_img: Option<(Lsn, Bytes)>, base_img: Option<(Lsn, Bytes)>,
records: Vec<(Lsn, NeonWalRecord)>, records: Vec<(Lsn, NeonWalRecord)>,
_pg_version: u32, _pg_version: PgMajorVersion,
_redo_attempt_type: RedoAttemptType, _redo_attempt_type: RedoAttemptType,
) -> Result<Bytes, walredo::Error> { ) -> Result<Bytes, walredo::Error> {
let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1)); let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
@@ -6223,7 +6224,7 @@ mod tests {
async fn randomize_timeline( async fn randomize_timeline(
tenant: &Arc<TenantShard>, tenant: &Arc<TenantShard>,
new_timeline_id: TimelineId, new_timeline_id: TimelineId,
pg_version: u32, pg_version: PgMajorVersion,
spec: TestTimelineSpecification, spec: TestTimelineSpecification,
random: &mut rand::rngs::StdRng, random: &mut rand::rngs::StdRng,
ctx: &RequestContext, ctx: &RequestContext,

View File

@@ -18,6 +18,7 @@
//! [`IndexPart`]: super::remote_timeline_client::index::IndexPart //! [`IndexPart`]: super::remote_timeline_client::index::IndexPart
use anyhow::ensure; use anyhow::ensure;
use postgres_ffi::PgMajorVersion;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use utils::bin_ser::{BeSer, SerializeError}; use utils::bin_ser::{BeSer, SerializeError};
use utils::id::TimelineId; use utils::id::TimelineId;
@@ -136,7 +137,7 @@ struct TimelineMetadataBodyV2 {
latest_gc_cutoff_lsn: Lsn, latest_gc_cutoff_lsn: Lsn,
initdb_lsn: Lsn, initdb_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
} }
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -167,7 +168,7 @@ impl TimelineMetadata {
ancestor_lsn: Lsn, ancestor_lsn: Lsn,
latest_gc_cutoff_lsn: Lsn, latest_gc_cutoff_lsn: Lsn,
initdb_lsn: Lsn, initdb_lsn: Lsn,
pg_version: u32, pg_version: PgMajorVersion,
) -> Self { ) -> Self {
Self { Self {
hdr: TimelineMetadataHeader { hdr: TimelineMetadataHeader {
@@ -215,7 +216,7 @@ impl TimelineMetadata {
ancestor_lsn: body.ancestor_lsn, ancestor_lsn: body.ancestor_lsn,
latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn, latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn,
initdb_lsn: body.initdb_lsn, initdb_lsn: body.initdb_lsn,
pg_version: 14, // All timelines created before this version had pg_version 14 pg_version: PgMajorVersion::PG14, // All timelines created before this version had pg_version 14
}; };
hdr.format_version = METADATA_FORMAT_VERSION; hdr.format_version = METADATA_FORMAT_VERSION;
@@ -317,7 +318,7 @@ impl TimelineMetadata {
self.body.initdb_lsn self.body.initdb_lsn
} }
pub fn pg_version(&self) -> u32 { pub fn pg_version(&self) -> PgMajorVersion {
self.body.pg_version self.body.pg_version
} }
@@ -331,7 +332,7 @@ impl TimelineMetadata {
Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(),
Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(),
Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(),
0, PgMajorVersion::PG14,
); );
let bytes = instance.to_bytes().unwrap(); let bytes = instance.to_bytes().unwrap();
Self::from_bytes(&bytes).unwrap() Self::from_bytes(&bytes).unwrap()
@@ -545,7 +546,7 @@ mod tests {
Lsn(0), Lsn(0),
Lsn(0), Lsn(0),
Lsn(0), Lsn(0),
14, // All timelines created before this version had pg_version 14 PgMajorVersion::PG14, // All timelines created before this version had pg_version 14
); );
assert_eq!( assert_eq!(
@@ -565,7 +566,7 @@ mod tests {
Lsn(0), Lsn(0),
// Updating this version to 17 will cause the test to fail at the // Updating this version to 17 will cause the test to fail at the
// next assert_eq!(). // next assert_eq!().
16, PgMajorVersion::PG16,
); );
let expected_bytes = vec![ let expected_bytes = vec![
/* TimelineMetadataHeader */ /* TimelineMetadataHeader */

View File

@@ -427,8 +427,8 @@ impl GcBlocking {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use postgres_ffi::PgMajorVersion;
use std::str::FromStr; use std::str::FromStr;
use utils::id::TimelineId; use utils::id::TimelineId;
use super::*; use super::*;
@@ -831,7 +831,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
archived_at: None, archived_at: None,
@@ -893,7 +893,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
archived_at: Some(parse_naive_datetime("2023-04-29T09:00:00.123000000")), archived_at: Some(parse_naive_datetime("2023-04-29T09:00:00.123000000")),
@@ -957,7 +957,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: None, deleted_at: None,
lineage: Default::default(), lineage: Default::default(),
@@ -1033,7 +1033,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: None, deleted_at: None,
lineage: Default::default(), lineage: Default::default(),
@@ -1114,7 +1114,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: None, deleted_at: None,
lineage: Default::default(), lineage: Default::default(),
@@ -1199,7 +1199,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: None, deleted_at: None,
lineage: Default::default(), lineage: Default::default(),
@@ -1287,7 +1287,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(),
14, PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(), ).with_recalculated_checksum().unwrap(),
deleted_at: None, deleted_at: None,
lineage: Default::default(), lineage: Default::default(),

View File

@@ -1622,11 +1622,6 @@ impl DeltaLayerIterator<'_> {
pub(crate) mod test { pub(crate) mod test {
use std::collections::BTreeMap; use std::collections::BTreeMap;
use bytes::Bytes;
use itertools::MinMaxResult;
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
use rand::{Rng, RngCore};
use super::*; use super::*;
use crate::DEFAULT_PG_VERSION; use crate::DEFAULT_PG_VERSION;
use crate::context::DownloadBehavior; use crate::context::DownloadBehavior;
@@ -1636,6 +1631,11 @@ pub(crate) mod test {
use crate::tenant::storage_layer::{Layer, ResidentLayer}; use crate::tenant::storage_layer::{Layer, ResidentLayer};
use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
use crate::tenant::{TenantShard, Timeline}; use crate::tenant::{TenantShard, Timeline};
use bytes::Bytes;
use itertools::MinMaxResult;
use postgres_ffi::PgMajorVersion;
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
use rand::{Rng, RngCore};
/// Construct an index for a fictional delta layer and and then /// Construct an index for a fictional delta layer and and then
/// traverse in order to plan vectored reads for a query. Finally, /// traverse in order to plan vectored reads for a query. Finally,
@@ -1995,7 +1995,7 @@ pub(crate) mod test {
let (tenant, ctx) = h.load().await; let (tenant, ctx) = h.load().await;
let ctx = &ctx; let ctx = &ctx;
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, ctx) .create_test_timeline(TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, ctx)
.await .await
.unwrap(); .unwrap();
let ctx = &ctx.with_scope_timeline(&timeline); let ctx = &ctx.with_scope_timeline(&timeline);

View File

@@ -1,6 +1,7 @@
use std::time::UNIX_EPOCH; use std::time::UNIX_EPOCH;
use pageserver_api::key::{CONTROLFILE_KEY, Key}; use pageserver_api::key::{CONTROLFILE_KEY, Key};
use postgres_ffi::PgMajorVersion;
use tokio::task::JoinSet; use tokio::task::JoinSet;
use utils::completion::{self, Completion}; use utils::completion::{self, Completion};
use utils::id::TimelineId; use utils::id::TimelineId;
@@ -45,7 +46,7 @@ async fn smoke_test() {
.create_test_timeline_with_layers( .create_test_timeline_with_layers(
TimelineId::generate(), TimelineId::generate(),
Lsn(0x10), Lsn(0x10),
14, PgMajorVersion::PG14,
&ctx, &ctx,
Default::default(), // in-memory layers Default::default(), // in-memory layers
Default::default(), Default::default(),
@@ -256,7 +257,12 @@ async fn evict_and_wait_on_wanted_deleted() {
let (tenant, ctx) = h.load().await; let (tenant, ctx) = h.load().await;
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();
@@ -341,7 +347,12 @@ fn read_wins_pending_eviction() {
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();
let ctx = ctx.with_scope_timeline(&timeline); let ctx = ctx.with_scope_timeline(&timeline);
@@ -474,7 +485,12 @@ fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) {
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();
let ctx = ctx.with_scope_timeline(&timeline); let ctx = ctx.with_scope_timeline(&timeline);
@@ -644,7 +660,12 @@ async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() {
let (tenant, ctx) = h.load().await; let (tenant, ctx) = h.load().await;
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();
let ctx = ctx.with_scope_timeline(&timeline); let ctx = ctx.with_scope_timeline(&timeline);
@@ -730,7 +751,12 @@ async fn evict_and_wait_does_not_wait_for_download() {
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();
let ctx = ctx.with_scope_timeline(&timeline); let ctx = ctx.with_scope_timeline(&timeline);
@@ -836,7 +862,12 @@ async fn eviction_cancellation_on_drop() {
let (tenant, ctx) = h.load().await; let (tenant, ctx) = h.load().await;
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();

View File

@@ -58,7 +58,7 @@ use pageserver_api::reltag::{BlockNumber, RelTag};
use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId}; use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId};
use postgres_connection::PgConnectionConfig; use postgres_connection::PgConnectionConfig;
use postgres_ffi::v14::xlog_utils; use postgres_ffi::v14::xlog_utils;
use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp}; use postgres_ffi::{PgMajorVersion, WAL_SEGMENT_SIZE, to_pg_timestamp};
use rand::Rng; use rand::Rng;
use remote_storage::DownloadError; use remote_storage::DownloadError;
use serde_with::serde_as; use serde_with::serde_as;
@@ -225,7 +225,7 @@ pub struct Timeline {
/// to shards, and is constant through the lifetime of this Timeline. /// to shards, and is constant through the lifetime of this Timeline.
shard_identity: ShardIdentity, shard_identity: ShardIdentity,
pub pg_version: u32, pub pg_version: PgMajorVersion,
/// The tuple has two elements. /// The tuple has two elements.
/// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote). /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
@@ -2913,7 +2913,7 @@ impl Timeline {
shard_identity: ShardIdentity, shard_identity: ShardIdentity,
walredo_mgr: Option<Arc<super::WalRedoManager>>, walredo_mgr: Option<Arc<super::WalRedoManager>>,
resources: TimelineResources, resources: TimelineResources,
pg_version: u32, pg_version: PgMajorVersion,
state: TimelineState, state: TimelineState,
attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>, attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
create_idempotency: crate::tenant::CreateTimelineIdempotency, create_idempotency: crate::tenant::CreateTimelineIdempotency,
@@ -7593,6 +7593,7 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use pageserver_api::key::Key; use pageserver_api::key::Key;
use postgres_ffi::PgMajorVersion;
use std::iter::Iterator; use std::iter::Iterator;
use tracing::Instrument; use tracing::Instrument;
use utils::id::TimelineId; use utils::id::TimelineId;
@@ -7667,7 +7668,7 @@ mod tests {
.create_test_timeline_with_layers( .create_test_timeline_with_layers(
TimelineId::generate(), TimelineId::generate(),
Lsn(0x10), Lsn(0x10),
14, PgMajorVersion::PG14,
&ctx, &ctx,
Vec::new(), // in-memory layers Vec::new(), // in-memory layers
delta_layers, delta_layers,
@@ -7803,7 +7804,7 @@ mod tests {
.create_test_timeline_with_layers( .create_test_timeline_with_layers(
TimelineId::generate(), TimelineId::generate(),
Lsn(0x10), Lsn(0x10),
14, PgMajorVersion::PG14,
&ctx, &ctx,
Vec::new(), // in-memory layers Vec::new(), // in-memory layers
delta_layers, delta_layers,
@@ -7863,7 +7864,12 @@ mod tests {
let (tenant, ctx) = harness.load().await; let (tenant, ctx) = harness.load().await;
let timeline = tenant let timeline = tenant
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) .create_test_timeline(
TimelineId::generate(),
Lsn(0x10),
PgMajorVersion::PG14,
&ctx,
)
.await .await
.unwrap(); .unwrap();

View File

@@ -3,7 +3,7 @@ use std::sync::Arc;
use anyhow::Context; use anyhow::Context;
use bytes::Bytes; use bytes::Bytes;
use postgres_ffi::ControlFileData; use postgres_ffi::{ControlFileData, PgMajorVersion};
use remote_storage::{ use remote_storage::{
Download, DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, Listing, Download, DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, Listing,
ListingObject, RemotePath, RemoteStorageConfig, ListingObject, RemotePath, RemoteStorageConfig,
@@ -264,7 +264,7 @@ impl ControlFile {
pub(crate) fn base_lsn(&self) -> Lsn { pub(crate) fn base_lsn(&self) -> Lsn {
Lsn(self.control_file_data.checkPoint).align() Lsn(self.control_file_data.checkPoint).align()
} }
pub(crate) fn pg_version(&self) -> u32 { pub(crate) fn pg_version(&self) -> PgMajorVersion {
self.try_pg_version() self.try_pg_version()
.expect("prepare() checks that try_pg_version doesn't error") .expect("prepare() checks that try_pg_version doesn't error")
} }
@@ -274,13 +274,14 @@ impl ControlFile {
pub(crate) fn control_file_buf(&self) -> &Bytes { pub(crate) fn control_file_buf(&self) -> &Bytes {
&self.control_file_buf &self.control_file_buf
} }
fn try_pg_version(&self) -> anyhow::Result<u32> {
fn try_pg_version(&self) -> anyhow::Result<PgMajorVersion> {
Ok(match self.control_file_data.catalog_version_no { Ok(match self.control_file_data.catalog_version_no {
// thesea are from catversion.h // thesea are from catversion.h
202107181 => 14, 202107181 => PgMajorVersion::PG14,
202209061 => 15, 202209061 => PgMajorVersion::PG15,
202307071 => 16, 202307071 => PgMajorVersion::PG16,
202406281 => 17, 202406281 => PgMajorVersion::PG17,
catversion => { catversion => {
anyhow::bail!("unrecognized catalog version {catversion}") anyhow::bail!("unrecognized catalog version {catversion}")
} }

View File

@@ -32,8 +32,8 @@ use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardIdentity;
use postgres_ffi::walrecord::*; use postgres_ffi::walrecord::*;
use postgres_ffi::{ use postgres_ffi::{
TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, enum_pgversion_dispatch, PgMajorVersion, TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion,
fsm_logical_to_physical, pg_constants, enum_pgversion_dispatch, fsm_logical_to_physical, pg_constants,
}; };
use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
use tracing::*; use tracing::*;
@@ -781,7 +781,7 @@ impl WalIngest {
) -> Result<(), WalIngestError> { ) -> Result<(), WalIngestError> {
let (xact_common, is_commit, is_prepared) = match record { let (xact_common, is_commit, is_prepared) = match record {
XactRecord::Prepare(XactPrepare { xl_xid, data }) => { XactRecord::Prepare(XactPrepare { xl_xid, data }) => {
let xid: u64 = if modification.tline.pg_version >= 17 { let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 {
self.adjust_to_full_transaction_id(xl_xid)? self.adjust_to_full_transaction_id(xl_xid)?
} else { } else {
xl_xid as u64 xl_xid as u64
@@ -886,7 +886,7 @@ impl WalIngest {
xl_xid, parsed.xid, lsn, xl_xid, parsed.xid, lsn,
); );
let xid: u64 = if modification.tline.pg_version >= 17 { let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 {
self.adjust_to_full_transaction_id(parsed.xid)? self.adjust_to_full_transaction_id(parsed.xid)?
} else { } else {
parsed.xid as u64 parsed.xid as u64
@@ -1241,7 +1241,7 @@ impl WalIngest {
if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID
&& info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN && info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
{ {
let oldest_active_xid = if pg_version >= 17 { let oldest_active_xid = if pg_version >= PgMajorVersion::PG17 {
let mut oldest_active_full_xid = cp.nextXid.value; let mut oldest_active_full_xid = cp.nextXid.value;
for xid in modification.tline.list_twophase_files(lsn, ctx).await? { for xid in modification.tline.list_twophase_files(lsn, ctx).await? {
if xid < oldest_active_full_xid { if xid < oldest_active_full_xid {
@@ -1475,10 +1475,11 @@ impl WalIngest {
const fn rate_limiter( const fn rate_limiter(
&self, &self,
pg_version: u32, pg_version: PgMajorVersion,
) -> Option<&Lazy<Mutex<RateLimit>>> { ) -> Option<&Lazy<Mutex<RateLimit>>> {
const MIN_PG_VERSION: u32 = 14; const MIN_PG_VERSION: u32 = PgMajorVersion::PG14.major_version_num();
const MAX_PG_VERSION: u32 = 17; const MAX_PG_VERSION: u32 = PgMajorVersion::PG17.major_version_num();
let pg_version = pg_version.major_version_num();
if pg_version < MIN_PG_VERSION || pg_version > MAX_PG_VERSION { if pg_version < MIN_PG_VERSION || pg_version > MAX_PG_VERSION {
return None; return None;
@@ -1603,6 +1604,7 @@ async fn get_relsize(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use anyhow::Result; use anyhow::Result;
use postgres_ffi::PgMajorVersion;
use postgres_ffi::RELSEG_SIZE; use postgres_ffi::RELSEG_SIZE;
use super::*; use super::*;
@@ -1625,7 +1627,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_zeroed_checkpoint_decodes_correctly() -> Result<(), anyhow::Error> { async fn test_zeroed_checkpoint_decodes_correctly() -> Result<(), anyhow::Error> {
for i in 14..=16 { for i in PgMajorVersion::ALL {
dispatch_pgversion!(i, { dispatch_pgversion!(i, {
pgv::CheckPoint::decode(&pgv::ZERO_CHECKPOINT)?; pgv::CheckPoint::decode(&pgv::ZERO_CHECKPOINT)?;
}); });
@@ -2335,7 +2337,7 @@ mod tests {
// 5. Grep sk logs for "restart decoder" to get startpoint // 5. Grep sk logs for "restart decoder" to get startpoint
// 6. Run just the decoder from this test to get the endpoint. // 6. Run just the decoder from this test to get the endpoint.
// It's the last LSN the decoder will output. // It's the last LSN the decoder will output.
let pg_version = 15; // The test data was generated by pg15 let pg_version = PgMajorVersion::PG15; // The test data was generated by pg15
let path = "test_data/sk_wal_segment_from_pgbench"; let path = "test_data/sk_wal_segment_from_pgbench";
let wal_segment_path = format!("{path}/000000010000000000000001.zst"); let wal_segment_path = format!("{path}/000000010000000000000001.zst");
let source_initdb_path = format!("{path}/{INITDB_PATH}"); let source_initdb_path = format!("{path}/{INITDB_PATH}");

View File

@@ -33,6 +33,7 @@ use bytes::{Bytes, BytesMut};
use pageserver_api::key::Key; use pageserver_api::key::Key;
use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus}; use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus};
use pageserver_api::shard::TenantShardId; use pageserver_api::shard::TenantShardId;
use postgres_ffi::PgMajorVersion;
use tracing::*; use tracing::*;
use utils::lsn::Lsn; use utils::lsn::Lsn;
use utils::sync::gate::GateError; use utils::sync::gate::GateError;
@@ -165,7 +166,7 @@ impl PostgresRedoManager {
lsn: Lsn, lsn: Lsn,
base_img: Option<(Lsn, Bytes)>, base_img: Option<(Lsn, Bytes)>,
records: Vec<(Lsn, NeonWalRecord)>, records: Vec<(Lsn, NeonWalRecord)>,
pg_version: u32, pg_version: PgMajorVersion,
redo_attempt_type: RedoAttemptType, redo_attempt_type: RedoAttemptType,
) -> Result<Bytes, Error> { ) -> Result<Bytes, Error> {
if records.is_empty() { if records.is_empty() {
@@ -232,7 +233,7 @@ impl PostgresRedoManager {
/// # Cancel-Safety /// # Cancel-Safety
/// ///
/// This method is cancellation-safe. /// This method is cancellation-safe.
pub async fn ping(&self, pg_version: u32) -> Result<(), Error> { pub async fn ping(&self, pg_version: PgMajorVersion) -> Result<(), Error> {
self.do_with_walredo_process(pg_version, |proc| async move { self.do_with_walredo_process(pg_version, |proc| async move {
proc.ping(Duration::from_secs(1)) proc.ping(Duration::from_secs(1))
.await .await
@@ -342,7 +343,7 @@ impl PostgresRedoManager {
O, O,
>( >(
&self, &self,
pg_version: u32, pg_version: PgMajorVersion,
closure: F, closure: F,
) -> Result<O, Error> { ) -> Result<O, Error> {
let proc: Arc<Process> = match self.redo_process.get_or_init_detached().await { let proc: Arc<Process> = match self.redo_process.get_or_init_detached().await {
@@ -442,7 +443,7 @@ impl PostgresRedoManager {
base_img_lsn: Lsn, base_img_lsn: Lsn,
records: &[(Lsn, NeonWalRecord)], records: &[(Lsn, NeonWalRecord)],
wal_redo_timeout: Duration, wal_redo_timeout: Duration,
pg_version: u32, pg_version: PgMajorVersion,
max_retry_attempts: u32, max_retry_attempts: u32,
) -> Result<Bytes, Error> { ) -> Result<Bytes, Error> {
*(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); *(self.last_redo_at.lock().unwrap()) = Some(Instant::now());
@@ -572,6 +573,7 @@ mod tests {
use bytes::Bytes; use bytes::Bytes;
use pageserver_api::key::Key; use pageserver_api::key::Key;
use pageserver_api::shard::TenantShardId; use pageserver_api::shard::TenantShardId;
use postgres_ffi::PgMajorVersion;
use tracing::Instrument; use tracing::Instrument;
use utils::id::TenantId; use utils::id::TenantId;
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -586,7 +588,7 @@ mod tests {
let h = RedoHarness::new().unwrap(); let h = RedoHarness::new().unwrap();
h.manager h.manager
.ping(14) .ping(PgMajorVersion::PG14)
.instrument(h.span()) .instrument(h.span())
.await .await
.expect("ping should work"); .expect("ping should work");
@@ -612,7 +614,7 @@ mod tests {
Lsn::from_str("0/16E2408").unwrap(), Lsn::from_str("0/16E2408").unwrap(),
None, None,
short_records(), short_records(),
14, PgMajorVersion::PG14,
RedoAttemptType::ReadPage, RedoAttemptType::ReadPage,
) )
.instrument(h.span()) .instrument(h.span())
@@ -641,7 +643,7 @@ mod tests {
Lsn::from_str("0/16E2408").unwrap(), Lsn::from_str("0/16E2408").unwrap(),
None, None,
short_records(), short_records(),
14, PgMajorVersion::PG14,
RedoAttemptType::ReadPage, RedoAttemptType::ReadPage,
) )
.instrument(h.span()) .instrument(h.span())
@@ -663,7 +665,7 @@ mod tests {
Lsn::INVALID, Lsn::INVALID,
None, None,
short_records(), short_records(),
16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */ PgMajorVersion::PG16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */
RedoAttemptType::ReadPage, RedoAttemptType::ReadPage,
) )
.instrument(h.span()) .instrument(h.span())

View File

@@ -12,7 +12,7 @@ use anyhow::Context;
use bytes::Bytes; use bytes::Bytes;
use pageserver_api::reltag::RelTag; use pageserver_api::reltag::RelTag;
use pageserver_api::shard::TenantShardId; use pageserver_api::shard::TenantShardId;
use postgres_ffi::BLCKSZ; use postgres_ffi::{BLCKSZ, PgMajorVersion};
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tracing::{Instrument, debug, error, instrument}; use tracing::{Instrument, debug, error, instrument};
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -54,11 +54,11 @@ impl WalRedoProcess {
// //
// Start postgres binary in special WAL redo mode. // Start postgres binary in special WAL redo mode.
// //
#[instrument(skip_all,fields(pg_version=pg_version))] #[instrument(skip_all,fields(pg_version=pg_version.major_version_num()))]
pub(crate) fn launch( pub(crate) fn launch(
conf: &'static PageServerConf, conf: &'static PageServerConf,
tenant_shard_id: TenantShardId, tenant_shard_id: TenantShardId,
pg_version: u32, pg_version: PgMajorVersion,
) -> anyhow::Result<Self> { ) -> anyhow::Result<Self> {
crate::span::debug_assert_current_span_has_tenant_id(); crate::span::debug_assert_current_span_has_tenant_id();

View File

@@ -58,6 +58,7 @@ metrics.workspace = true
pem.workspace = true pem.workspace = true
postgres_backend.workspace = true postgres_backend.workspace = true
postgres_ffi.workspace = true postgres_ffi.workspace = true
postgres_versioninfo.workspace = true
pq_proto.workspace = true pq_proto.workspace = true
remote_storage.workspace = true remote_storage.workspace = true
safekeeper_api.workspace = true safekeeper_api.workspace = true

View File

@@ -2,6 +2,7 @@
use std::vec; use std::vec;
use anyhow::{Result, bail}; use anyhow::{Result, bail};
use postgres_versioninfo::PgVersionId;
use pq_proto::SystemId; use pq_proto::SystemId;
use safekeeper_api::membership::{Configuration, INVALID_GENERATION}; use safekeeper_api::membership::{Configuration, INVALID_GENERATION};
use safekeeper_api::{ServerInfo, Term}; use safekeeper_api::{ServerInfo, Term};
@@ -46,7 +47,7 @@ struct SafeKeeperStateV1 {
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerInfoV2 { pub struct ServerInfoV2 {
/// Postgres server version /// Postgres server version
pub pg_version: u32, pub pg_version: PgVersionId,
pub system_id: SystemId, pub system_id: SystemId,
pub tenant_id: TenantId, pub tenant_id: TenantId,
pub timeline_id: TimelineId, pub timeline_id: TimelineId,
@@ -75,7 +76,7 @@ pub struct SafeKeeperStateV2 {
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerInfoV3 { pub struct ServerInfoV3 {
/// Postgres server version /// Postgres server version
pub pg_version: u32, pub pg_version: PgVersionId,
pub system_id: SystemId, pub system_id: SystemId,
#[serde(with = "hex")] #[serde(with = "hex")]
pub tenant_id: TenantId, pub tenant_id: TenantId,
@@ -444,13 +445,13 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<TimelinePersiste
} else if version == 6 { } else if version == 6 {
info!("reading safekeeper control file version {}", version); info!("reading safekeeper control file version {}", version);
let mut oldstate = TimelinePersistentState::des(&buf[..buf.len()])?; let mut oldstate = TimelinePersistentState::des(&buf[..buf.len()])?;
if oldstate.server.pg_version != 0 { if oldstate.server.pg_version != PgVersionId::UNKNOWN {
return Ok(oldstate); return Ok(oldstate);
} }
// set pg_version to the default v14 // set pg_version to the default v14
info!("setting pg_version to 140005"); info!("setting pg_version to 140005");
oldstate.server.pg_version = 140005; oldstate.server.pg_version = PgVersionId::from_full_pg_version(140005);
return Ok(oldstate); return Ok(oldstate);
} else if version == 7 { } else if version == 7 {
@@ -547,6 +548,7 @@ pub fn downgrade_v10_to_v9(state: &TimelinePersistentState) -> TimelinePersisten
mod tests { mod tests {
use std::str::FromStr; use std::str::FromStr;
use postgres_versioninfo::PgMajorVersion;
use utils::Hex; use utils::Hex;
use utils::id::NodeId; use utils::id::NodeId;
@@ -563,7 +565,7 @@ mod tests {
epoch: 43, epoch: 43,
}, },
server: ServerInfoV2 { server: ServerInfoV2 {
pg_version: 14, pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321, system_id: 0x1234567887654321,
tenant_id, tenant_id,
timeline_id, timeline_id,
@@ -586,8 +588,8 @@ mod tests {
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// epoch // epoch
0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// pg_version // pg_version = 140000
0x0e, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00,
// system_id // system_id
0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
// tenant_id // tenant_id
@@ -626,7 +628,7 @@ mod tests {
}]), }]),
}, },
server: ServerInfoV2 { server: ServerInfoV2 {
pg_version: 14, pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321, system_id: 0x1234567887654321,
tenant_id, tenant_id,
timeline_id, timeline_id,
@@ -646,7 +648,7 @@ mod tests {
let expected = [ let expected = [
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
0x34, 0x12, 0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef, 0x34, 0x12, 0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef,
0xaa, 0x5e, 0xcf, 0x96, 0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5, 0xaa, 0x5e, 0xcf, 0x96, 0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5,
0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4, 0x78, 0x56, 0x34, 0x12, 0xc4, 0x7a, 0x42, 0xa5, 0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4, 0x78, 0x56, 0x34, 0x12, 0xc4, 0x7a, 0x42, 0xa5,
@@ -675,7 +677,7 @@ mod tests {
}]), }]),
}, },
server: ServerInfoV3 { server: ServerInfoV3 {
pg_version: 14, pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321, system_id: 0x1234567887654321,
tenant_id, tenant_id,
timeline_id, timeline_id,
@@ -695,7 +697,7 @@ mod tests {
let expected = [ let expected = [
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34, 0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34,
0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37, 0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37,
0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36, 0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36,
@@ -731,7 +733,7 @@ mod tests {
}]), }]),
}, },
server: ServerInfo { server: ServerInfo {
pg_version: 14, pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321, system_id: 0x1234567887654321,
wal_seg_size: 0x12345678, wal_seg_size: 0x12345678,
}, },
@@ -765,7 +767,7 @@ mod tests {
0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x2a, 0x00, 0x00, 0x00, 0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x2a, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56,
0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x34, 0x37, 0x61, 0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x34, 0x37, 0x61,
0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39, 0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39,
0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31, 0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31,

View File

@@ -9,6 +9,7 @@ use anyhow::{Context, Result, bail};
use byteorder::{LittleEndian, ReadBytesExt}; use byteorder::{LittleEndian, ReadBytesExt};
use bytes::{Buf, BufMut, Bytes, BytesMut}; use bytes::{Buf, BufMut, Bytes, BytesMut};
use postgres_ffi::{MAX_SEND_SIZE, TimeLineID}; use postgres_ffi::{MAX_SEND_SIZE, TimeLineID};
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
use pq_proto::SystemId; use pq_proto::SystemId;
use safekeeper_api::membership::{ use safekeeper_api::membership::{
INVALID_GENERATION, MemberSet, SafekeeperGeneration as Generation, SafekeeperId, INVALID_GENERATION, MemberSet, SafekeeperGeneration as Generation, SafekeeperId,
@@ -29,7 +30,7 @@ use crate::{control_file, wal_storage};
pub const SK_PROTO_VERSION_2: u32 = 2; pub const SK_PROTO_VERSION_2: u32 = 2;
pub const SK_PROTO_VERSION_3: u32 = 3; pub const SK_PROTO_VERSION_3: u32 = 3;
pub const UNKNOWN_SERVER_VERSION: u32 = 0; pub const UNKNOWN_SERVER_VERSION: PgVersionId = PgVersionId::UNKNOWN;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct TermLsn { pub struct TermLsn {
@@ -218,7 +219,7 @@ pub struct ProposerGreeting {
pub timeline_id: TimelineId, pub timeline_id: TimelineId,
pub mconf: membership::Configuration, pub mconf: membership::Configuration,
/// Postgres server version /// Postgres server version
pub pg_version: u32, pub pg_version: PgVersionId,
pub system_id: SystemId, pub system_id: SystemId,
pub wal_seg_size: u32, pub wal_seg_size: u32,
} }
@@ -229,7 +230,7 @@ pub struct ProposerGreetingV2 {
/// proposer-acceptor protocol version /// proposer-acceptor protocol version
pub protocol_version: u32, pub protocol_version: u32,
/// Postgres server version /// Postgres server version
pub pg_version: u32, pub pg_version: PgVersionId,
pub proposer_id: PgUuid, pub proposer_id: PgUuid,
pub system_id: SystemId, pub system_id: SystemId,
pub timeline_id: TimelineId, pub timeline_id: TimelineId,
@@ -511,7 +512,7 @@ impl ProposerAcceptorMessage {
tenant_id, tenant_id,
timeline_id, timeline_id,
mconf, mconf,
pg_version, pg_version: PgVersionId::from_full_pg_version(pg_version),
system_id, system_id,
wal_seg_size, wal_seg_size,
}; };
@@ -961,7 +962,8 @@ where
* because safekeepers parse WAL headers and the format * because safekeepers parse WAL headers and the format
* may change between versions. * may change between versions.
*/ */
if msg.pg_version / 10000 != self.state.server.pg_version / 10000 if PgMajorVersion::try_from(msg.pg_version)?
!= PgMajorVersion::try_from(self.state.server.pg_version)?
&& self.state.server.pg_version != UNKNOWN_SERVER_VERSION && self.state.server.pg_version != UNKNOWN_SERVER_VERSION
{ {
bail!( bail!(
@@ -1748,7 +1750,7 @@ mod tests {
}]), }]),
}, },
server: ServerInfo { server: ServerInfo {
pg_version: 14, pg_version: PgVersionId::from_full_pg_version(140000),
system_id: 0x1234567887654321, system_id: 0x1234567887654321,
wal_seg_size: 0x12345678, wal_seg_size: 0x12345678,
}, },

View File

@@ -8,8 +8,8 @@ use futures::StreamExt;
use futures::future::Either; use futures::future::Either;
use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardIdentity;
use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend}; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend};
use postgres_ffi::get_current_timestamp;
use postgres_ffi::waldecoder::{WalDecodeError, WalStreamDecoder}; use postgres_ffi::waldecoder::{WalDecodeError, WalStreamDecoder};
use postgres_ffi::{PgMajorVersion, get_current_timestamp};
use pq_proto::{BeMessage, InterpretedWalRecordsBody, WalSndKeepAlive}; use pq_proto::{BeMessage, InterpretedWalRecordsBody, WalSndKeepAlive};
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::error::SendError;
@@ -78,7 +78,7 @@ pub(crate) struct InterpretedWalReader {
shard_senders: HashMap<ShardIdentity, smallvec::SmallVec<[ShardSenderState; 1]>>, shard_senders: HashMap<ShardIdentity, smallvec::SmallVec<[ShardSenderState; 1]>>,
shard_notification_rx: Option<tokio::sync::mpsc::UnboundedReceiver<AttachShardNotification>>, shard_notification_rx: Option<tokio::sync::mpsc::UnboundedReceiver<AttachShardNotification>>,
state: Arc<std::sync::RwLock<InterpretedWalReaderState>>, state: Arc<std::sync::RwLock<InterpretedWalReaderState>>,
pg_version: u32, pg_version: PgMajorVersion,
} }
/// A handle for [`InterpretedWalReader`] which allows for interacting with it /// A handle for [`InterpretedWalReader`] which allows for interacting with it
@@ -258,7 +258,7 @@ impl InterpretedWalReader {
start_pos: Lsn, start_pos: Lsn,
tx: tokio::sync::mpsc::Sender<Batch>, tx: tokio::sync::mpsc::Sender<Batch>,
shard: ShardIdentity, shard: ShardIdentity,
pg_version: u32, pg_version: PgMajorVersion,
appname: &Option<String>, appname: &Option<String>,
) -> InterpretedWalReaderHandle { ) -> InterpretedWalReaderHandle {
let state = Arc::new(std::sync::RwLock::new(InterpretedWalReaderState::Running { let state = Arc::new(std::sync::RwLock::new(InterpretedWalReaderState::Running {
@@ -322,7 +322,7 @@ impl InterpretedWalReader {
start_pos: Lsn, start_pos: Lsn,
tx: tokio::sync::mpsc::Sender<Batch>, tx: tokio::sync::mpsc::Sender<Batch>,
shard: ShardIdentity, shard: ShardIdentity,
pg_version: u32, pg_version: PgMajorVersion,
shard_notification_rx: Option< shard_notification_rx: Option<
tokio::sync::mpsc::UnboundedReceiver<AttachShardNotification>, tokio::sync::mpsc::UnboundedReceiver<AttachShardNotification>,
>, >,
@@ -718,7 +718,7 @@ mod tests {
use std::time::Duration; use std::time::Duration;
use pageserver_api::shard::{ShardIdentity, ShardStripeSize}; use pageserver_api::shard::{ShardIdentity, ShardStripeSize};
use postgres_ffi::MAX_SEND_SIZE; use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion};
use tokio::sync::mpsc::error::TryRecvError; use tokio::sync::mpsc::error::TryRecvError;
use utils::id::{NodeId, TenantTimelineId}; use utils::id::{NodeId, TenantTimelineId};
use utils::lsn::Lsn; use utils::lsn::Lsn;
@@ -734,7 +734,7 @@ mod tests {
const SIZE: usize = 8 * 1024; const SIZE: usize = 8 * 1024;
const MSG_COUNT: usize = 200; const MSG_COUNT: usize = 200;
const PG_VERSION: u32 = 17; const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
const SHARD_COUNT: u8 = 2; const SHARD_COUNT: u8 = 2;
let start_lsn = Lsn::from_str("0/149FD18").unwrap(); let start_lsn = Lsn::from_str("0/149FD18").unwrap();
@@ -876,7 +876,7 @@ mod tests {
const SIZE: usize = 8 * 1024; const SIZE: usize = 8 * 1024;
const MSG_COUNT: usize = 200; const MSG_COUNT: usize = 200;
const PG_VERSION: u32 = 17; const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
const SHARD_COUNT: u8 = 2; const SHARD_COUNT: u8 = 2;
let start_lsn = Lsn::from_str("0/149FD18").unwrap(); let start_lsn = Lsn::from_str("0/149FD18").unwrap();
@@ -1025,7 +1025,7 @@ mod tests {
const SIZE: usize = 64 * 1024; const SIZE: usize = 64 * 1024;
const MSG_COUNT: usize = 10; const MSG_COUNT: usize = 10;
const PG_VERSION: u32 = 17; const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
const SHARD_COUNT: u8 = 2; const SHARD_COUNT: u8 = 2;
const WAL_READER_BATCH_SIZE: usize = 8192; const WAL_READER_BATCH_SIZE: usize = 8192;
@@ -1148,7 +1148,7 @@ mod tests {
const SIZE: usize = 8 * 1024; const SIZE: usize = 8 * 1024;
const MSG_COUNT: usize = 10; const MSG_COUNT: usize = 10;
const PG_VERSION: u32 = 17; const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
let start_lsn = Lsn::from_str("0/149FD18").unwrap(); let start_lsn = Lsn::from_str("0/149FD18").unwrap();
let env = Env::new(true).unwrap(); let env = Env::new(true).unwrap();

View File

@@ -12,7 +12,7 @@ use futures::FutureExt;
use itertools::Itertools; use itertools::Itertools;
use parking_lot::Mutex; use parking_lot::Mutex;
use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError}; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError};
use postgres_ffi::{MAX_SEND_SIZE, TimestampTz, get_current_timestamp}; use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, TimestampTz, get_current_timestamp};
use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody}; use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody};
use safekeeper_api::Term; use safekeeper_api::Term;
use safekeeper_api::models::{ use safekeeper_api::models::{
@@ -559,7 +559,9 @@ impl SafekeeperPostgresHandler {
format, format,
compression, compression,
} => { } => {
let pg_version = tli.tli.get_state().await.1.server.pg_version / 10000; let pg_version =
PgMajorVersion::try_from(tli.tli.get_state().await.1.server.pg_version)
.unwrap();
let end_watch_view = end_watch.view(); let end_watch_view = end_watch.view();
let wal_residence_guard = tli.wal_residence_guard().await?; let wal_residence_guard = tli.wal_residence_guard().await?;
let (tx, rx) = tokio::sync::mpsc::channel::<Batch>(2); let (tx, rx) = tokio::sync::mpsc::channel::<Batch>(2);

View File

@@ -7,6 +7,7 @@ use std::time::SystemTime;
use anyhow::{Result, bail}; use anyhow::{Result, bail};
use postgres_ffi::WAL_SEGMENT_SIZE; use postgres_ffi::WAL_SEGMENT_SIZE;
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
use safekeeper_api::membership::Configuration; use safekeeper_api::membership::Configuration;
use safekeeper_api::models::{TimelineMembershipSwitchResponse, TimelineTermBumpResponse}; use safekeeper_api::models::{TimelineMembershipSwitchResponse, TimelineTermBumpResponse};
use safekeeper_api::{INITIAL_TERM, ServerInfo, Term}; use safekeeper_api::{INITIAL_TERM, ServerInfo, Term};
@@ -149,7 +150,7 @@ impl TimelinePersistentState {
&TenantTimelineId::empty(), &TenantTimelineId::empty(),
Configuration::empty(), Configuration::empty(),
ServerInfo { ServerInfo {
pg_version: 170000, /* Postgres server version (major * 10000) */ pg_version: PgVersionId::from(PgMajorVersion::PG17),
system_id: 0, /* Postgres system identifier */ system_id: 0, /* Postgres system identifier */
wal_seg_size: WAL_SEGMENT_SIZE as u32, wal_seg_size: WAL_SEGMENT_SIZE as u32,
}, },

View File

@@ -19,6 +19,7 @@ use futures::future::BoxFuture;
use postgres_ffi::v14::xlog_utils::{IsPartialXLogFileName, IsXLogFileName, XLogFromFileName}; use postgres_ffi::v14::xlog_utils::{IsPartialXLogFileName, IsXLogFileName, XLogFromFileName};
use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo, dispatch_pgversion}; use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo, dispatch_pgversion};
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
use pq_proto::SystemId; use pq_proto::SystemId;
use remote_storage::RemotePath; use remote_storage::RemotePath;
use std::sync::Arc; use std::sync::Arc;
@@ -92,7 +93,7 @@ pub struct PhysicalStorage {
/// Size of WAL segment in bytes. /// Size of WAL segment in bytes.
wal_seg_size: usize, wal_seg_size: usize,
pg_version: u32, pg_version: PgVersionId,
system_id: u64, system_id: u64,
/// Written to disk, but possibly still in the cache and not fully persisted. /// Written to disk, but possibly still in the cache and not fully persisted.
@@ -180,7 +181,7 @@ impl PhysicalStorage {
let write_lsn = if state.commit_lsn == Lsn(0) { let write_lsn = if state.commit_lsn == Lsn(0) {
Lsn(0) Lsn(0)
} else { } else {
let version = state.server.pg_version / 10000; let version = PgMajorVersion::try_from(state.server.pg_version).unwrap();
dispatch_pgversion!( dispatch_pgversion!(
version, version,
@@ -226,7 +227,10 @@ impl PhysicalStorage {
write_record_lsn: write_lsn, write_record_lsn: write_lsn,
flush_lsn, flush_lsn,
flush_record_lsn: flush_lsn, flush_record_lsn: flush_lsn,
decoder: WalStreamDecoder::new(write_lsn, state.server.pg_version / 10000), decoder: WalStreamDecoder::new(
write_lsn,
PgMajorVersion::try_from(state.server.pg_version).unwrap(),
),
file: None, file: None,
pending_wal_truncation: true, pending_wal_truncation: true,
}) })
@@ -408,7 +412,7 @@ impl Storage for PhysicalStorage {
let segno = init_lsn.segment_number(self.wal_seg_size); let segno = init_lsn.segment_number(self.wal_seg_size);
let (mut file, _) = self.open_or_create(segno).await?; let (mut file, _) = self.open_or_create(segno).await?;
let major_pg_version = self.pg_version / 10000; let major_pg_version = PgMajorVersion::try_from(self.pg_version).unwrap();
let wal_seg = let wal_seg =
postgres_ffi::generate_wal_segment(segno, self.system_id, major_pg_version, init_lsn)?; postgres_ffi::generate_wal_segment(segno, self.system_id, major_pg_version, init_lsn)?;
file.seek(SeekFrom::Start(0)).await?; file.seek(SeekFrom::Start(0)).await?;
@@ -654,7 +658,7 @@ pub struct WalReader {
// pos is in the same segment as timeline_start_lsn. // pos is in the same segment as timeline_start_lsn.
timeline_start_lsn: Lsn, timeline_start_lsn: Lsn,
// integer version number of PostgreSQL, e.g. 14; 15; 16 // integer version number of PostgreSQL, e.g. 14; 15; 16
pg_version: u32, pg_version: PgMajorVersion,
system_id: SystemId, system_id: SystemId,
timeline_start_segment: Option<Bytes>, timeline_start_segment: Option<Bytes>,
} }
@@ -697,7 +701,7 @@ impl WalReader {
wal_backup, wal_backup,
local_start_lsn: state.local_start_lsn, local_start_lsn: state.local_start_lsn,
timeline_start_lsn: state.timeline_start_lsn, timeline_start_lsn: state.timeline_start_lsn,
pg_version: state.server.pg_version / 10000, pg_version: PgMajorVersion::try_from(state.server.pg_version).unwrap(),
system_id: state.server.system_id, system_id: state.server.system_id,
timeline_start_segment: None, timeline_start_segment: None,
}) })

View File

@@ -7,8 +7,8 @@ use anyhow::Result;
use bytes::{Buf, BytesMut}; use bytes::{Buf, BytesMut};
use futures::future::BoxFuture; use futures::future::BoxFuture;
use parking_lot::Mutex; use parking_lot::Mutex;
use postgres_ffi::XLogSegNo;
use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_ffi::{PgMajorVersion, XLogSegNo};
use safekeeper::metrics::WalStorageMetrics; use safekeeper::metrics::WalStorageMetrics;
use safekeeper::state::TimelinePersistentState; use safekeeper::state::TimelinePersistentState;
use safekeeper::{control_file, wal_storage}; use safekeeper::{control_file, wal_storage};
@@ -142,7 +142,7 @@ impl DiskWALStorage {
write_lsn, write_lsn,
write_record_lsn: flush_lsn, write_record_lsn: flush_lsn,
flush_record_lsn: flush_lsn, flush_record_lsn: flush_lsn,
decoder: WalStreamDecoder::new(flush_lsn, 16), decoder: WalStreamDecoder::new(flush_lsn, PgMajorVersion::PG16),
unflushed_bytes: BytesMut::new(), unflushed_bytes: BytesMut::new(),
disk, disk,
}) })
@@ -151,7 +151,7 @@ impl DiskWALStorage {
fn find_end_of_wal(disk: Arc<TimelineDisk>, start_lsn: Lsn) -> Result<Lsn> { fn find_end_of_wal(disk: Arc<TimelineDisk>, start_lsn: Lsn) -> Result<Lsn> {
let mut buf = [0; 8192]; let mut buf = [0; 8192];
let mut pos = start_lsn.0; let mut pos = start_lsn.0;
let mut decoder = WalStreamDecoder::new(start_lsn, 16); let mut decoder = WalStreamDecoder::new(start_lsn, PgMajorVersion::PG16);
let mut result = start_lsn; let mut result = start_lsn;
loop { loop {
disk.wal.lock().read(pos, &mut buf); disk.wal.lock().read(pos, &mut buf);
@@ -204,7 +204,7 @@ impl wal_storage::Storage for DiskWALStorage {
self.decoder.available(), self.decoder.available(),
startpos, startpos,
); );
self.decoder = WalStreamDecoder::new(startpos, 16); self.decoder = WalStreamDecoder::new(startpos, PgMajorVersion::PG16);
} }
self.decoder.feed_bytes(buf); self.decoder.feed_bytes(buf);
loop { loop {
@@ -242,7 +242,7 @@ impl wal_storage::Storage for DiskWALStorage {
self.write_record_lsn = end_pos; self.write_record_lsn = end_pos;
self.flush_record_lsn = end_pos; self.flush_record_lsn = end_pos;
self.unflushed_bytes.clear(); self.unflushed_bytes.clear();
self.decoder = WalStreamDecoder::new(end_pos, 16); self.decoder = WalStreamDecoder::new(end_pos, PgMajorVersion::PG16);
Ok(()) Ok(())
} }

View File

@@ -18,6 +18,7 @@ use pageserver_api::controller_api::{
SafekeeperDescribeResponse, SkSchedulingPolicy, TimelineImportRequest, SafekeeperDescribeResponse, SkSchedulingPolicy, TimelineImportRequest,
}; };
use pageserver_api::models::{SafekeeperInfo, SafekeepersInfo, TimelineInfo}; use pageserver_api::models::{SafekeeperInfo, SafekeepersInfo, TimelineInfo};
use safekeeper_api::PgVersionId;
use safekeeper_api::membership::{MemberSet, SafekeeperGeneration, SafekeeperId}; use safekeeper_api::membership::{MemberSet, SafekeeperGeneration, SafekeeperId};
use tokio::task::JoinSet; use tokio::task::JoinSet;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
@@ -44,7 +45,7 @@ impl Service {
&self, &self,
tenant_id: TenantId, tenant_id: TenantId,
timeline_id: TimelineId, timeline_id: TimelineId,
pg_version: u32, pg_version: PgVersionId,
timeline_persistence: &TimelinePersistence, timeline_persistence: &TimelinePersistence,
) -> Result<Vec<NodeId>, ApiError> { ) -> Result<Vec<NodeId>, ApiError> {
// If quorum is reached, return if we are outside of a specified timeout // If quorum is reached, return if we are outside of a specified timeout
@@ -219,7 +220,7 @@ impl Service {
read_only: bool, read_only: bool,
) -> Result<SafekeepersInfo, ApiError> { ) -> Result<SafekeepersInfo, ApiError> {
let timeline_id = timeline_info.timeline_id; let timeline_id = timeline_info.timeline_id;
let pg_version = timeline_info.pg_version * 10000; let pg_version = PgVersionId::from(timeline_info.pg_version);
// Initially start_lsn is determined by last_record_lsn in pageserver // Initially start_lsn is determined by last_record_lsn in pageserver
// response as it does initdb. However, later we persist it and in sk // response as it does initdb. However, later we persist it and in sk
// creation calls replace with the value from the timeline row if it // creation calls replace with the value from the timeline row if it

View File

@@ -172,7 +172,7 @@ def test_cannot_create_endpoint_on_non_uploaded_timeline(neon_env_builder: NeonE
env.initial_tenant, env.initial_tenant,
env.initial_timeline, env.initial_timeline,
MembershipConfiguration(generation=1, members=[sk.safekeeper_id()], new_members=None), MembershipConfiguration(generation=1, members=[sk.safekeeper_id()], new_members=None),
int(env.pg_version), int(env.pg_version) * 10000,
Lsn(0), Lsn(0),
None, None,
) )