mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-22 21:59:59 +00:00
Use enum-typed PG versions (#12317)
This makes it possible for the compiler to validate that a match block matched all PostgreSQL versions we support. ## Problem We did not have a complete picture about which places we had to test against PG versions, and what format these versions were: The full PG version ID format (Major/minor/bugfix `MMmmbb`) as transfered in protocol messages, or only the Major release version (`MM`). This meant type confusion was rampant. With this change, it becomes easier to develop new version-dependent features, by making type and niche confusion impossible. ## Summary of changes Every use of `pg_version` is now typed as either `PgVersionId` (u32, valued in decimal `MMmmbb`) or PgMajorVersion (an enum, with a value for every major version we support, serialized and stored like a u32 with the value of that major version) --------- Co-authored-by: Arpad Müller <arpad-m@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
158d84ea30
commit
6c6de6382a
29
Cargo.lock
generated
29
Cargo.lock
generated
@@ -1318,6 +1318,7 @@ dependencies = [
|
||||
"p256 0.13.2",
|
||||
"postgres",
|
||||
"postgres_initdb",
|
||||
"postgres_versioninfo",
|
||||
"regex",
|
||||
"remote_storage",
|
||||
"reqwest",
|
||||
@@ -4406,6 +4407,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"postgres_backend",
|
||||
"postgres_ffi_types",
|
||||
"postgres_versioninfo",
|
||||
"rand 0.8.5",
|
||||
"remote_storage",
|
||||
"reqwest",
|
||||
@@ -4429,6 +4431,7 @@ dependencies = [
|
||||
"futures",
|
||||
"http-utils",
|
||||
"pageserver_api",
|
||||
"postgres_versioninfo",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
@@ -4897,6 +4900,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"postgres",
|
||||
"postgres_ffi_types",
|
||||
"postgres_versioninfo",
|
||||
"pprof",
|
||||
"regex",
|
||||
"serde",
|
||||
@@ -4919,11 +4923,23 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"camino",
|
||||
"postgres_versioninfo",
|
||||
"thiserror 1.0.69",
|
||||
"tokio",
|
||||
"workspace_hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres_versioninfo"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"serde",
|
||||
"serde_repr",
|
||||
"thiserror 1.0.69",
|
||||
"workspace_hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "posthog_client_lite"
|
||||
version = "0.1.0"
|
||||
@@ -6115,6 +6131,7 @@ dependencies = [
|
||||
"postgres-protocol",
|
||||
"postgres_backend",
|
||||
"postgres_ffi",
|
||||
"postgres_versioninfo",
|
||||
"pprof",
|
||||
"pq_proto",
|
||||
"rand 0.8.5",
|
||||
@@ -6159,6 +6176,7 @@ dependencies = [
|
||||
"const_format",
|
||||
"pageserver_api",
|
||||
"postgres_ffi",
|
||||
"postgres_versioninfo",
|
||||
"pq_proto",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -6481,6 +6499,17 @@ dependencies = [
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_repr"
|
||||
version = "0.1.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.6"
|
||||
|
||||
@@ -23,6 +23,7 @@ members = [
|
||||
"libs/pageserver_api",
|
||||
"libs/postgres_ffi",
|
||||
"libs/postgres_ffi_types",
|
||||
"libs/postgres_versioninfo",
|
||||
"libs/safekeeper_api",
|
||||
"libs/desim",
|
||||
"libs/neon-shmem",
|
||||
@@ -174,6 +175,7 @@ serde_json = "1"
|
||||
serde_path_to_error = "0.1"
|
||||
serde_with = { version = "3", features = [ "base64" ] }
|
||||
serde_assert = "0.5.0"
|
||||
serde_repr = "0.1.20"
|
||||
sha2 = "0.10.2"
|
||||
signal-hook = "0.3"
|
||||
smallvec = "1.11"
|
||||
@@ -261,6 +263,7 @@ postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" }
|
||||
postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" }
|
||||
postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
||||
postgres_ffi_types = { version = "0.1", path = "./libs/postgres_ffi_types/" }
|
||||
postgres_versioninfo = { version = "0.1", path = "./libs/postgres_versioninfo/" }
|
||||
postgres_initdb = { path = "./libs/postgres_initdb" }
|
||||
posthog_client_lite = { version = "0.1", path = "./libs/posthog_client_lite" }
|
||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||
|
||||
@@ -64,6 +64,7 @@ uuid.workspace = true
|
||||
walkdir.workspace = true
|
||||
x509-cert.workspace = true
|
||||
|
||||
postgres_versioninfo.workspace = true
|
||||
postgres_initdb.workspace = true
|
||||
compute_api.workspace = true
|
||||
utils.workspace = true
|
||||
|
||||
@@ -29,7 +29,7 @@ use anyhow::{Context, bail};
|
||||
use aws_config::BehaviorVersion;
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use clap::{Parser, Subcommand};
|
||||
use compute_tools::extension_server::{PostgresMajorVersion, get_pg_version};
|
||||
use compute_tools::extension_server::get_pg_version;
|
||||
use nix::unistd::Pid;
|
||||
use std::ops::Not;
|
||||
use tracing::{Instrument, error, info, info_span, warn};
|
||||
@@ -179,12 +179,8 @@ impl PostgresProcess {
|
||||
.await
|
||||
.context("create pgdata directory")?;
|
||||
|
||||
let pg_version = match get_pg_version(self.pgbin.as_ref()) {
|
||||
PostgresMajorVersion::V14 => 14,
|
||||
PostgresMajorVersion::V15 => 15,
|
||||
PostgresMajorVersion::V16 => 16,
|
||||
PostgresMajorVersion::V17 => 17,
|
||||
};
|
||||
let pg_version = get_pg_version(self.pgbin.as_ref());
|
||||
|
||||
postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
|
||||
superuser: initdb_user,
|
||||
locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded,
|
||||
|
||||
@@ -74,9 +74,11 @@ More specifically, here is an example ext_index.json
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
|
||||
use anyhow::{Context, Result, bail};
|
||||
use bytes::Bytes;
|
||||
use compute_api::spec::RemoteExtSpec;
|
||||
use postgres_versioninfo::PgMajorVersion;
|
||||
use regex::Regex;
|
||||
use remote_storage::*;
|
||||
use reqwest::StatusCode;
|
||||
@@ -86,8 +88,6 @@ use tracing::log::warn;
|
||||
use url::Url;
|
||||
use zstd::stream::read::Decoder;
|
||||
|
||||
use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
|
||||
|
||||
fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
||||
// gives the result of `pg_config [argument]`
|
||||
// where argument is a flag like `--version` or `--sharedir`
|
||||
@@ -106,7 +106,7 @@ fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion {
|
||||
pub fn get_pg_version(pgbin: &str) -> PgMajorVersion {
|
||||
// pg_config --version returns a (platform specific) human readable string
|
||||
// such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc.
|
||||
let human_version = get_pg_config("--version", pgbin);
|
||||
@@ -114,25 +114,11 @@ pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion {
|
||||
}
|
||||
|
||||
pub fn get_pg_version_string(pgbin: &str) -> String {
|
||||
match get_pg_version(pgbin) {
|
||||
PostgresMajorVersion::V14 => "v14",
|
||||
PostgresMajorVersion::V15 => "v15",
|
||||
PostgresMajorVersion::V16 => "v16",
|
||||
PostgresMajorVersion::V17 => "v17",
|
||||
}
|
||||
.to_owned()
|
||||
get_pg_version(pgbin).v_str()
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum PostgresMajorVersion {
|
||||
V14,
|
||||
V15,
|
||||
V16,
|
||||
V17,
|
||||
}
|
||||
|
||||
fn parse_pg_version(human_version: &str) -> PostgresMajorVersion {
|
||||
use PostgresMajorVersion::*;
|
||||
fn parse_pg_version(human_version: &str) -> PgMajorVersion {
|
||||
use PgMajorVersion::*;
|
||||
// Normal releases have version strings like "PostgreSQL 15.4". But there
|
||||
// are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL
|
||||
// 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version
|
||||
@@ -143,10 +129,10 @@ fn parse_pg_version(human_version: &str) -> PostgresMajorVersion {
|
||||
.captures(human_version)
|
||||
{
|
||||
Some(captures) if captures.len() == 2 => match &captures["major"] {
|
||||
"14" => return V14,
|
||||
"15" => return V15,
|
||||
"16" => return V16,
|
||||
"17" => return V17,
|
||||
"14" => return PG14,
|
||||
"15" => return PG15,
|
||||
"16" => return PG16,
|
||||
"17" => return PG17,
|
||||
_ => {}
|
||||
},
|
||||
_ => {}
|
||||
@@ -343,25 +329,25 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_pg_version() {
|
||||
use super::PostgresMajorVersion::*;
|
||||
assert_eq!(parse_pg_version("PostgreSQL 15.4"), V15);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 15.14"), V15);
|
||||
use postgres_versioninfo::PgMajorVersion::*;
|
||||
assert_eq!(parse_pg_version("PostgreSQL 15.4"), PG15);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 15.14"), PG15);
|
||||
assert_eq!(
|
||||
parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"),
|
||||
V15
|
||||
PG15
|
||||
);
|
||||
|
||||
assert_eq!(parse_pg_version("PostgreSQL 14.15"), V14);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 14.0"), V14);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 14.15"), PG14);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 14.0"), PG14);
|
||||
assert_eq!(
|
||||
parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"),
|
||||
V14
|
||||
PG14
|
||||
);
|
||||
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16devel"), V16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16beta1"), V16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16rc2"), V16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16extra"), V16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16devel"), PG16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16beta1"), PG16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16rc2"), PG16);
|
||||
assert_eq!(parse_pg_version("PostgreSQL 16extra"), PG16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -48,7 +48,7 @@ use postgres_connection::parse_host_port;
|
||||
use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId};
|
||||
use safekeeper_api::{
|
||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT,
|
||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT,
|
||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, PgMajorVersion, PgVersionId,
|
||||
};
|
||||
use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR;
|
||||
use tokio::task::JoinSet;
|
||||
@@ -64,7 +64,7 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1);
|
||||
const DEFAULT_BRANCH_NAME: &str = "main";
|
||||
project_git_version!(GIT_VERSION);
|
||||
|
||||
const DEFAULT_PG_VERSION: u32 = 17;
|
||||
const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
|
||||
const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/";
|
||||
|
||||
@@ -169,7 +169,7 @@ struct TenantCreateCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version to use for the initial timeline")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
|
||||
#[clap(
|
||||
long,
|
||||
@@ -292,7 +292,7 @@ struct TimelineCreateCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
@@ -324,7 +324,7 @@ struct TimelineImportCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version of the backup being imported")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
#[derive(clap::Subcommand)]
|
||||
@@ -603,7 +603,7 @@ struct EndpointCreateCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
|
||||
/// Use gRPC to communicate with Pageservers, by generating grpc:// connstrings.
|
||||
///
|
||||
@@ -1295,7 +1295,7 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re
|
||||
},
|
||||
new_members: None,
|
||||
};
|
||||
let pg_version = args.pg_version * 10000;
|
||||
let pg_version = PgVersionId::from(args.pg_version);
|
||||
let req = safekeeper_api::models::TimelineCreateRequest {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
|
||||
@@ -67,6 +67,7 @@ use nix::sys::signal::{Signal, kill};
|
||||
use pageserver_api::shard::ShardStripeSize;
|
||||
use pem::Pem;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use safekeeper_api::membership::SafekeeperGeneration;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
@@ -89,7 +90,7 @@ pub struct EndpointConf {
|
||||
pg_port: u16,
|
||||
external_http_port: u16,
|
||||
internal_http_port: u16,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
grpc: bool,
|
||||
skip_pg_catalog_updates: bool,
|
||||
reconfigure_concurrency: usize,
|
||||
@@ -192,7 +193,7 @@ impl ComputeControlPlane {
|
||||
pg_port: Option<u16>,
|
||||
external_http_port: Option<u16>,
|
||||
internal_http_port: Option<u16>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
mode: ComputeMode,
|
||||
grpc: bool,
|
||||
skip_pg_catalog_updates: bool,
|
||||
@@ -312,7 +313,7 @@ pub struct Endpoint {
|
||||
pub internal_http_address: SocketAddr,
|
||||
|
||||
// postgres major version in the format: 14, 15, etc.
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
|
||||
// These are not part of the endpoint as such, but the environment
|
||||
// the endpoint runs in.
|
||||
@@ -557,7 +558,7 @@ impl Endpoint {
|
||||
conf.append("hot_standby", "on");
|
||||
// prefetching of blocks referenced in WAL doesn't make sense for us
|
||||
// Neon hot standby ignores pages that are not in the shared_buffers
|
||||
if self.pg_version >= 15 {
|
||||
if self.pg_version >= PgMajorVersion::PG15 {
|
||||
conf.append("recovery_prefetch", "off");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ use clap::ValueEnum;
|
||||
use pem::Pem;
|
||||
use postgres_backend::AuthType;
|
||||
use reqwest::{Certificate, Url};
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utils::auth::encode_from_key_file;
|
||||
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
|
||||
@@ -424,25 +425,21 @@ impl LocalEnv {
|
||||
self.pg_distrib_dir.clone()
|
||||
}
|
||||
|
||||
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
|
||||
let path = self.pg_distrib_dir.clone();
|
||||
|
||||
#[allow(clippy::manual_range_patterns)]
|
||||
match pg_version {
|
||||
14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))),
|
||||
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||
}
|
||||
Ok(path.join(pg_version.v_str()))
|
||||
}
|
||||
|
||||
pub fn pg_dir(&self, pg_version: u32, dir_name: &str) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_dir(&self, pg_version: PgMajorVersion, dir_name: &str) -> anyhow::Result<PathBuf> {
|
||||
Ok(self.pg_distrib_dir(pg_version)?.join(dir_name))
|
||||
}
|
||||
|
||||
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
|
||||
self.pg_dir(pg_version, "bin")
|
||||
}
|
||||
|
||||
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
|
||||
self.pg_dir(pg_version, "lib")
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_client::mgmt_api;
|
||||
use postgres_backend::AuthType;
|
||||
use postgres_connection::{PgConnectionConfig, parse_host_port};
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use utils::auth::{Claims, Scope};
|
||||
use utils::id::{NodeId, TenantId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
@@ -607,7 +608,7 @@ impl PageServerNode {
|
||||
timeline_id: TimelineId,
|
||||
base: (Lsn, PathBuf),
|
||||
pg_wal: Option<(Lsn, PathBuf)>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<()> {
|
||||
// Init base reader
|
||||
let (start_lsn, base_tarfile_path) = base;
|
||||
|
||||
@@ -6,6 +6,8 @@ use std::str::FromStr;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::background_process;
|
||||
use crate::local_env::{LocalEnv, NeonStorageControllerConf};
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use hyper0::Uri;
|
||||
use nix::unistd::Pid;
|
||||
@@ -22,6 +24,7 @@ use pageserver_client::mgmt_api::ResponseErrorMessageExt;
|
||||
use pem::Pem;
|
||||
use postgres_backend::AuthType;
|
||||
use reqwest::{Method, Response};
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::process::Command;
|
||||
@@ -31,9 +34,6 @@ use utils::auth::{Claims, Scope, encode_from_key_file};
|
||||
use utils::id::{NodeId, TenantId};
|
||||
use whoami::username;
|
||||
|
||||
use crate::background_process;
|
||||
use crate::local_env::{LocalEnv, NeonStorageControllerConf};
|
||||
|
||||
pub struct StorageController {
|
||||
env: LocalEnv,
|
||||
private_key: Option<Pem>,
|
||||
@@ -48,7 +48,7 @@ pub struct StorageController {
|
||||
|
||||
const COMMAND: &str = "storage_controller";
|
||||
|
||||
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
|
||||
const STORAGE_CONTROLLER_POSTGRES_VERSION: PgMajorVersion = PgMajorVersion::PG16;
|
||||
|
||||
const DB_NAME: &str = "storage_controller";
|
||||
|
||||
@@ -184,9 +184,15 @@ impl StorageController {
|
||||
/// to other versions if that one isn't found. Some automated tests create circumstances
|
||||
/// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
|
||||
async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> {
|
||||
let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14];
|
||||
const PREFER_VERSIONS: [PgMajorVersion; 5] = [
|
||||
STORAGE_CONTROLLER_POSTGRES_VERSION,
|
||||
PgMajorVersion::PG16,
|
||||
PgMajorVersion::PG15,
|
||||
PgMajorVersion::PG14,
|
||||
PgMajorVersion::PG17,
|
||||
];
|
||||
|
||||
for v in prefer_versions {
|
||||
for v in PREFER_VERSIONS {
|
||||
let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap();
|
||||
if tokio::fs::try_exists(&path).await? {
|
||||
return Ok(path);
|
||||
|
||||
@@ -18,6 +18,7 @@ bytes.workspace = true
|
||||
byteorder.workspace = true
|
||||
utils.workspace = true
|
||||
postgres_ffi_types.workspace = true
|
||||
postgres_versioninfo.workspace = true
|
||||
enum-map.workspace = true
|
||||
strum.workspace = true
|
||||
strum_macros.workspace = true
|
||||
|
||||
@@ -11,6 +11,7 @@ use std::time::{Duration, SystemTime};
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
use camino::Utf8PathBuf;
|
||||
use postgres_versioninfo::PgMajorVersion;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_with::serde_as;
|
||||
pub use utilization::PageserverUtilization;
|
||||
@@ -398,7 +399,7 @@ pub enum TimelineCreateRequestMode {
|
||||
// inherits the ancestor's pg_version. Earlier code wasn't
|
||||
// using a flattened enum, so, it was an accepted field, and
|
||||
// we continue to accept it by having it here.
|
||||
pg_version: Option<u32>,
|
||||
pg_version: Option<PgMajorVersion>,
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
read_only: bool,
|
||||
},
|
||||
@@ -410,7 +411,7 @@ pub enum TimelineCreateRequestMode {
|
||||
Bootstrap {
|
||||
#[serde(default)]
|
||||
existing_initdb_timeline_id: Option<TimelineId>,
|
||||
pg_version: Option<u32>,
|
||||
pg_version: Option<PgMajorVersion>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1573,7 +1574,7 @@ pub struct TimelineInfo {
|
||||
pub last_received_msg_lsn: Option<Lsn>,
|
||||
/// the timestamp (in microseconds) of the last received message
|
||||
pub last_received_msg_ts: Option<u128>,
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgMajorVersion,
|
||||
|
||||
pub state: TimelineState,
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ serde.workspace = true
|
||||
postgres_ffi_types.workspace = true
|
||||
utils.workspace = true
|
||||
tracing.workspace = true
|
||||
postgres_versioninfo.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger.workspace = true
|
||||
|
||||
@@ -4,6 +4,7 @@ use criterion::{Bencher, Criterion, criterion_group, criterion_main};
|
||||
use postgres_ffi::v17::wal_generator::LogicalMessageGenerator;
|
||||
use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler;
|
||||
use postgres_ffi::waldecoder::WalStreamDecoder;
|
||||
use postgres_versioninfo::PgMajorVersion;
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
@@ -32,7 +33,7 @@ fn bench_complete_record(c: &mut Criterion) {
|
||||
let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX);
|
||||
let value = vec![1; value_size];
|
||||
|
||||
let mut decoder = WalStreamDecoder::new(Lsn(0), 170000);
|
||||
let mut decoder = WalStreamDecoder::new(Lsn(0), PgMajorVersion::PG17);
|
||||
let msg = LogicalMessageGenerator::new(PREFIX, &value)
|
||||
.next()
|
||||
.unwrap()
|
||||
|
||||
@@ -14,6 +14,8 @@ use bytes::Bytes;
|
||||
use utils::bin_ser::SerializeError;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
pub use postgres_versioninfo::PgMajorVersion;
|
||||
|
||||
macro_rules! postgres_ffi {
|
||||
($version:ident) => {
|
||||
#[path = "."]
|
||||
@@ -91,21 +93,22 @@ macro_rules! dispatch_pgversion {
|
||||
$version => $code,
|
||||
default = $invalid_pgver_handling,
|
||||
pgversions = [
|
||||
14 : v14,
|
||||
15 : v15,
|
||||
16 : v16,
|
||||
17 : v17,
|
||||
$crate::PgMajorVersion::PG14 => v14,
|
||||
$crate::PgMajorVersion::PG15 => v15,
|
||||
$crate::PgMajorVersion::PG16 => v16,
|
||||
$crate::PgMajorVersion::PG17 => v17,
|
||||
]
|
||||
)
|
||||
};
|
||||
($pgversion:expr => $code:expr,
|
||||
default = $default:expr,
|
||||
pgversions = [$($sv:literal : $vsv:ident),+ $(,)?]) => {
|
||||
match ($pgversion) {
|
||||
pgversions = [$($sv:pat => $vsv:ident),+ $(,)?]) => {
|
||||
match ($pgversion.clone().into()) {
|
||||
$($sv => {
|
||||
use $crate::$vsv as pgv;
|
||||
$code
|
||||
},)+
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => {
|
||||
$default
|
||||
}
|
||||
@@ -179,9 +182,9 @@ macro_rules! enum_pgversion {
|
||||
$($variant ( $crate::$md::$t )),+
|
||||
}
|
||||
impl self::$name {
|
||||
pub fn pg_version(&self) -> u32 {
|
||||
pub fn pg_version(&self) -> PgMajorVersion {
|
||||
enum_pgversion_dispatch!(self, $name, _ign, {
|
||||
pgv::bindings::PG_MAJORVERSION_NUM
|
||||
pgv::bindings::MY_PGVERSION
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -195,15 +198,15 @@ macro_rules! enum_pgversion {
|
||||
};
|
||||
{name = $name:ident,
|
||||
path = $p:ident,
|
||||
typ = $t:ident,
|
||||
$(typ = $t:ident,)?
|
||||
pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => {
|
||||
pub enum $name {
|
||||
$($variant ($crate::$md::$p::$t)),+
|
||||
$($variant $(($crate::$md::$p::$t))?),+
|
||||
}
|
||||
impl $name {
|
||||
pub fn pg_version(&self) -> u32 {
|
||||
pub fn pg_version(&self) -> PgMajorVersion {
|
||||
enum_pgversion_dispatch!(self, $name, _ign, {
|
||||
pgv::bindings::PG_MAJORVERSION_NUM
|
||||
pgv::bindings::MY_PGVERSION
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -249,22 +252,21 @@ pub use v14::xlog_utils::{
|
||||
try_from_pg_timestamp,
|
||||
};
|
||||
|
||||
pub fn bkpimage_is_compressed(bimg_info: u8, version: u32) -> bool {
|
||||
pub fn bkpimage_is_compressed(bimg_info: u8, version: PgMajorVersion) -> bool {
|
||||
dispatch_pgversion!(version, pgv::bindings::bkpimg_is_compressed(bimg_info))
|
||||
}
|
||||
|
||||
pub fn generate_wal_segment(
|
||||
segno: u64,
|
||||
system_id: u64,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
lsn: Lsn,
|
||||
) -> Result<Bytes, SerializeError> {
|
||||
assert_eq!(segno, lsn.segment_number(WAL_SEGMENT_SIZE));
|
||||
|
||||
dispatch_pgversion!(
|
||||
pg_version,
|
||||
pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn),
|
||||
Err(SerializeError::BadInput)
|
||||
pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -272,7 +274,7 @@ pub fn generate_pg_control(
|
||||
pg_control_bytes: &[u8],
|
||||
checkpoint_bytes: &[u8],
|
||||
lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<(Bytes, u64, bool)> {
|
||||
dispatch_pgversion!(
|
||||
pg_version,
|
||||
@@ -352,6 +354,7 @@ pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber {
|
||||
pub mod waldecoder {
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
use crate::PgMajorVersion;
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use thiserror::Error;
|
||||
use utils::lsn::Lsn;
|
||||
@@ -369,7 +372,7 @@ pub mod waldecoder {
|
||||
|
||||
pub struct WalStreamDecoder {
|
||||
pub lsn: Lsn,
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgMajorVersion,
|
||||
pub inputbuf: BytesMut,
|
||||
pub state: State,
|
||||
}
|
||||
@@ -382,7 +385,7 @@ pub mod waldecoder {
|
||||
}
|
||||
|
||||
impl WalStreamDecoder {
|
||||
pub fn new(lsn: Lsn, pg_version: u32) -> WalStreamDecoder {
|
||||
pub fn new(lsn: Lsn, pg_version: PgMajorVersion) -> WalStreamDecoder {
|
||||
WalStreamDecoder {
|
||||
lsn,
|
||||
pg_version,
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
use crate::PgMajorVersion;
|
||||
|
||||
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG14;
|
||||
|
||||
pub const XLOG_DBASE_CREATE: u8 = 0x00;
|
||||
pub const XLOG_DBASE_DROP: u8 = 0x10;
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
use crate::PgMajorVersion;
|
||||
|
||||
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG15;
|
||||
|
||||
pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8;
|
||||
|
||||
pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00;
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
use crate::PgMajorVersion;
|
||||
|
||||
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG16;
|
||||
|
||||
pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8;
|
||||
|
||||
pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00;
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
use crate::PgMajorVersion;
|
||||
|
||||
pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
|
||||
pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8;
|
||||
|
||||
pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00;
|
||||
|
||||
@@ -9,8 +9,8 @@ use utils::bin_ser::DeserializeError;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use crate::{
|
||||
BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, RepOriginId,
|
||||
TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants,
|
||||
BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, PgMajorVersion,
|
||||
RepOriginId, TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants,
|
||||
};
|
||||
|
||||
#[repr(C)]
|
||||
@@ -199,20 +199,17 @@ impl DecodedWALRecord {
|
||||
/// Check if this WAL record represents a legacy "copy" database creation, which populates new relations
|
||||
/// by reading other existing relations' data blocks. This is more complex to apply than new-style database
|
||||
/// creations which simply include all the desired blocks in the WAL, so we need a helper function to detect this case.
|
||||
pub fn is_dbase_create_copy(&self, pg_version: u32) -> bool {
|
||||
pub fn is_dbase_create_copy(&self, pg_version: PgMajorVersion) -> bool {
|
||||
if self.xl_rmid == pg_constants::RM_DBASE_ID {
|
||||
let info = self.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
|
||||
match pg_version {
|
||||
14 => {
|
||||
PgMajorVersion::PG14 => {
|
||||
// Postgres 14 database creations are always the legacy kind
|
||||
info == crate::v14::bindings::XLOG_DBASE_CREATE
|
||||
}
|
||||
15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY,
|
||||
16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY,
|
||||
17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY,
|
||||
_ => {
|
||||
panic!("Unsupported postgres version {pg_version}")
|
||||
}
|
||||
PgMajorVersion::PG15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY,
|
||||
PgMajorVersion::PG16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY,
|
||||
PgMajorVersion::PG17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
@@ -248,7 +245,7 @@ impl DecodedWALRecord {
|
||||
pub fn decode_wal_record(
|
||||
record: Bytes,
|
||||
decoded: &mut DecodedWALRecord,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut rnode_spcnode: u32 = 0;
|
||||
let mut rnode_dbnode: u32 = 0;
|
||||
@@ -1106,9 +1103,9 @@ pub struct XlClogTruncate {
|
||||
}
|
||||
|
||||
impl XlClogTruncate {
|
||||
pub fn decode(buf: &mut Bytes, pg_version: u32) -> XlClogTruncate {
|
||||
pub fn decode(buf: &mut Bytes, pg_version: PgMajorVersion) -> XlClogTruncate {
|
||||
XlClogTruncate {
|
||||
pageno: if pg_version < 17 {
|
||||
pageno: if pg_version < PgMajorVersion::PG17 {
|
||||
buf.get_u32_le()
|
||||
} else {
|
||||
buf.get_u64_le() as u32
|
||||
|
||||
@@ -11,9 +11,9 @@ use super::super::waldecoder::WalStreamDecoder;
|
||||
use super::bindings::{
|
||||
CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, TimestampTz,
|
||||
XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC,
|
||||
MY_PGVERSION
|
||||
};
|
||||
use super::wal_generator::LogicalMessageGenerator;
|
||||
use super::PG_MAJORVERSION;
|
||||
use crate::pg_constants;
|
||||
use crate::PG_TLI;
|
||||
use crate::{uint32, uint64, Oid};
|
||||
@@ -233,7 +233,7 @@ pub fn find_end_of_wal(
|
||||
let mut result = start_lsn;
|
||||
let mut curr_lsn = start_lsn;
|
||||
let mut buf = [0u8; XLOG_BLCKSZ];
|
||||
let pg_version = PG_MAJORVERSION[1..3].parse::<u32>().unwrap();
|
||||
let pg_version = MY_PGVERSION;
|
||||
debug!("find_end_of_wal PG_VERSION: {}", pg_version);
|
||||
|
||||
let mut decoder = WalStreamDecoder::new(start_lsn, pg_version);
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::str::FromStr;
|
||||
use anyhow::*;
|
||||
use clap::{Arg, ArgMatches, Command, value_parser};
|
||||
use postgres::Client;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use wal_craft::*;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@@ -48,7 +49,7 @@ fn main() -> Result<()> {
|
||||
Some(("with-initdb", arg_matches)) => {
|
||||
let cfg = Conf {
|
||||
pg_version: *arg_matches
|
||||
.get_one::<u32>("pg-version")
|
||||
.get_one::<PgMajorVersion>("pg-version")
|
||||
.context("'pg-version' is required")?,
|
||||
pg_distrib_dir: arg_matches
|
||||
.get_one::<PathBuf>("pg-distrib-dir")
|
||||
|
||||
@@ -9,8 +9,8 @@ use log::*;
|
||||
use postgres::Client;
|
||||
use postgres::types::PgLsn;
|
||||
use postgres_ffi::{
|
||||
WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD,
|
||||
XLOG_SIZE_OF_XLOG_SHORT_PHD,
|
||||
PgMajorVersion, WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD,
|
||||
XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
|
||||
};
|
||||
|
||||
macro_rules! xlog_utils_test {
|
||||
@@ -29,7 +29,7 @@ macro_rules! xlog_utils_test {
|
||||
postgres_ffi::for_all_postgres_versions! { xlog_utils_test }
|
||||
|
||||
pub struct Conf {
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgMajorVersion,
|
||||
pub pg_distrib_dir: PathBuf,
|
||||
pub datadir: PathBuf,
|
||||
}
|
||||
@@ -52,11 +52,7 @@ impl Conf {
|
||||
pub fn pg_distrib_dir(&self) -> anyhow::Result<PathBuf> {
|
||||
let path = self.pg_distrib_dir.clone();
|
||||
|
||||
#[allow(clippy::manual_range_patterns)]
|
||||
match self.pg_version {
|
||||
14 | 15 | 16 | 17 => Ok(path.join(format!("v{}", self.pg_version))),
|
||||
_ => bail!("Unsupported postgres version: {}", self.pg_version),
|
||||
}
|
||||
Ok(path.join(self.pg_version.v_str()))
|
||||
}
|
||||
|
||||
fn pg_bin_dir(&self) -> anyhow::Result<PathBuf> {
|
||||
|
||||
@@ -24,7 +24,7 @@ fn init_logging() {
|
||||
fn test_end_of_wal<C: crate::Crafter>(test_name: &str) {
|
||||
use crate::*;
|
||||
|
||||
let pg_version = PG_MAJORVERSION[1..3].parse::<u32>().unwrap();
|
||||
let pg_version = MY_PGVERSION;
|
||||
|
||||
// Craft some WAL
|
||||
let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
|
||||
|
||||
@@ -9,4 +9,5 @@ anyhow.workspace = true
|
||||
tokio.workspace = true
|
||||
camino.workspace = true
|
||||
thiserror.workspace = true
|
||||
postgres_versioninfo.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
|
||||
@@ -7,12 +7,13 @@
|
||||
use std::fmt;
|
||||
|
||||
use camino::Utf8Path;
|
||||
use postgres_versioninfo::PgMajorVersion;
|
||||
|
||||
pub struct RunInitdbArgs<'a> {
|
||||
pub superuser: &'a str,
|
||||
pub locale: &'a str,
|
||||
pub initdb_bin: &'a Utf8Path,
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgMajorVersion,
|
||||
pub library_search_path: &'a Utf8Path,
|
||||
pub pgdata: &'a Utf8Path,
|
||||
}
|
||||
@@ -79,12 +80,16 @@ pub async fn do_run_initdb(args: RunInitdbArgs<'_>) -> Result<(), Error> {
|
||||
.stderr(std::process::Stdio::piped());
|
||||
|
||||
// Before version 14, only the libc provide was available.
|
||||
if pg_version > 14 {
|
||||
if pg_version > PgMajorVersion::PG14 {
|
||||
// Version 17 brought with it a builtin locale provider which only provides
|
||||
// C and C.UTF-8. While being safer for collation purposes since it is
|
||||
// guaranteed to be consistent throughout a major release, it is also more
|
||||
// performant.
|
||||
let locale_provider = if pg_version >= 17 { "builtin" } else { "libc" };
|
||||
let locale_provider = if pg_version >= PgMajorVersion::PG17 {
|
||||
"builtin"
|
||||
} else {
|
||||
"libc"
|
||||
};
|
||||
|
||||
initdb_command.args(["--locale-provider", locale_provider]);
|
||||
}
|
||||
|
||||
12
libs/postgres_versioninfo/Cargo.toml
Normal file
12
libs/postgres_versioninfo/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "postgres_versioninfo"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
thiserror.workspace = true
|
||||
serde.workspace = true
|
||||
serde_repr.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
175
libs/postgres_versioninfo/src/lib.rs
Normal file
175
libs/postgres_versioninfo/src/lib.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// An enum with one variant for each major version of PostgreSQL that we support.
|
||||
///
|
||||
#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Deserialize_repr, Serialize_repr)]
|
||||
#[repr(u32)]
|
||||
pub enum PgMajorVersion {
|
||||
PG14 = 14,
|
||||
PG15 = 15,
|
||||
PG16 = 16,
|
||||
PG17 = 17,
|
||||
// !!! When you add a new PgMajorVersion, don't forget to update PgMajorVersion::ALL
|
||||
}
|
||||
|
||||
/// A full PostgreSQL version ID, in MMmmbb numerical format (Major/minor/bugfix)
|
||||
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
|
||||
#[repr(transparent)]
|
||||
pub struct PgVersionId(u32);
|
||||
|
||||
impl PgVersionId {
|
||||
pub const UNKNOWN: PgVersionId = PgVersionId(0);
|
||||
|
||||
pub fn from_full_pg_version(version: u32) -> PgVersionId {
|
||||
match version {
|
||||
0 => PgVersionId(version), // unknown version
|
||||
140000..180000 => PgVersionId(version),
|
||||
_ => panic!("Invalid full PostgreSQL version ID {version}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for PgVersionId {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
u32::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for PgVersionId {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
u32::serialize(&self.0, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for PgVersionId {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
u32::deserialize(deserializer).map(PgVersionId)
|
||||
}
|
||||
|
||||
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
u32::deserialize_in_place(deserializer, &mut place.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl PgMajorVersion {
|
||||
/// Get the numerical representation of the represented Major Version
|
||||
pub const fn major_version_num(&self) -> u32 {
|
||||
match self {
|
||||
PgMajorVersion::PG14 => 14,
|
||||
PgMajorVersion::PG15 => 15,
|
||||
PgMajorVersion::PG16 => 16,
|
||||
PgMajorVersion::PG17 => 17,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the contents of this version's PG_VERSION file.
|
||||
///
|
||||
/// The PG_VERSION file is used to determine the PostgreSQL version that currently
|
||||
/// owns the data in a PostgreSQL data directory.
|
||||
pub fn versionfile_string(&self) -> &'static str {
|
||||
match self {
|
||||
PgMajorVersion::PG14 => "14",
|
||||
PgMajorVersion::PG15 => "15",
|
||||
PgMajorVersion::PG16 => "16\x0A",
|
||||
PgMajorVersion::PG17 => "17\x0A",
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the v{version} string of this major PostgreSQL version.
|
||||
///
|
||||
/// Because this was hand-coded in various places, this was moved into a shared
|
||||
/// implementation.
|
||||
pub fn v_str(&self) -> String {
|
||||
match self {
|
||||
PgMajorVersion::PG14 => "v14",
|
||||
PgMajorVersion::PG15 => "v15",
|
||||
PgMajorVersion::PG16 => "v16",
|
||||
PgMajorVersion::PG17 => "v17",
|
||||
}
|
||||
.to_string()
|
||||
}
|
||||
|
||||
/// All currently supported major versions of PostgreSQL.
|
||||
pub const ALL: &'static [PgMajorVersion] = &[
|
||||
PgMajorVersion::PG14,
|
||||
PgMajorVersion::PG15,
|
||||
PgMajorVersion::PG16,
|
||||
PgMajorVersion::PG17,
|
||||
];
|
||||
}
|
||||
|
||||
impl Display for PgMajorVersion {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(match self {
|
||||
PgMajorVersion::PG14 => "PgMajorVersion::PG14",
|
||||
PgMajorVersion::PG15 => "PgMajorVersion::PG15",
|
||||
PgMajorVersion::PG16 => "PgMajorVersion::PG16",
|
||||
PgMajorVersion::PG17 => "PgMajorVersion::PG17",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(dead_code)]
|
||||
pub struct InvalidPgVersion(u32);
|
||||
|
||||
impl Display for InvalidPgVersion {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "InvalidPgVersion({})", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PgVersionId> for PgMajorVersion {
|
||||
type Error = InvalidPgVersion;
|
||||
|
||||
fn try_from(value: PgVersionId) -> Result<Self, Self::Error> {
|
||||
Ok(match value.0 / 10000 {
|
||||
14 => PgMajorVersion::PG14,
|
||||
15 => PgMajorVersion::PG15,
|
||||
16 => PgMajorVersion::PG16,
|
||||
17 => PgMajorVersion::PG17,
|
||||
_ => return Err(InvalidPgVersion(value.0)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PgMajorVersion> for PgVersionId {
|
||||
fn from(value: PgMajorVersion) -> Self {
|
||||
PgVersionId((value as u32) * 10000)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
|
||||
pub struct PgMajorVersionParseError(String);
|
||||
|
||||
impl Display for PgMajorVersionParseError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "PgMajorVersionParseError({})", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PgMajorVersion {
|
||||
type Err = PgMajorVersionParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s {
|
||||
"14" => PgMajorVersion::PG14,
|
||||
"15" => PgMajorVersion::PG15,
|
||||
"16" => PgMajorVersion::PG16,
|
||||
"17" => PgMajorVersion::PG17,
|
||||
_ => return Err(PgMajorVersionParseError(s.to_string())),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ const_format.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
postgres_ffi.workspace = true
|
||||
postgres_versioninfo.workspace = true
|
||||
pq_proto.workspace = true
|
||||
tokio.workspace = true
|
||||
utils.workspace = true
|
||||
|
||||
@@ -8,6 +8,8 @@ pub mod membership;
|
||||
/// Public API types
|
||||
pub mod models;
|
||||
|
||||
pub use postgres_versioninfo::{PgMajorVersion, PgVersionId};
|
||||
|
||||
/// Consensus logical timestamp. Note: it is a part of sk control file.
|
||||
pub type Term = u64;
|
||||
/// With this term timeline is created initially. It
|
||||
@@ -20,7 +22,7 @@ pub const INITIAL_TERM: Term = 0;
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ServerInfo {
|
||||
/// Postgres server version
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgVersionId,
|
||||
pub system_id: SystemId,
|
||||
pub wal_seg_size: u32,
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::net::SocketAddr;
|
||||
|
||||
use pageserver_api::shard::ShardIdentity;
|
||||
use postgres_ffi::TimestampTz;
|
||||
use postgres_versioninfo::PgVersionId;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::time::Instant;
|
||||
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
|
||||
@@ -23,8 +24,7 @@ pub struct TimelineCreateRequest {
|
||||
pub tenant_id: TenantId,
|
||||
pub timeline_id: TimelineId,
|
||||
pub mconf: Configuration,
|
||||
/// In the PG_VERSION_NUM macro format, like 140017.
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgVersionId,
|
||||
pub system_id: Option<u64>,
|
||||
// By default WAL_SEGMENT_SIZE
|
||||
pub wal_seg_size: Option<u32>,
|
||||
|
||||
@@ -10,7 +10,7 @@ use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use pageserver_api::shard::{ShardIdentity, ShardStripeSize};
|
||||
use postgres_ffi::waldecoder::WalStreamDecoder;
|
||||
use postgres_ffi::{MAX_SEND_SIZE, WAL_SEGMENT_SIZE};
|
||||
use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, WAL_SEGMENT_SIZE};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use remote_storage::{
|
||||
DownloadOpts, GenericRemoteStorage, ListingMode, RemoteStorageConfig, RemoteStorageKind,
|
||||
@@ -115,7 +115,7 @@ struct BenchmarkData {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct BenchmarkMetadata {
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
start_lsn: Lsn,
|
||||
}
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ use bytes::{Buf, Bytes};
|
||||
use pageserver_api::key::rel_block_to_key;
|
||||
use pageserver_api::reltag::{RelTag, SlruKind};
|
||||
use pageserver_api::shard::ShardIdentity;
|
||||
use postgres_ffi::pg_constants;
|
||||
use postgres_ffi::walrecord::*;
|
||||
use postgres_ffi::{PgMajorVersion, pg_constants};
|
||||
use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
@@ -24,7 +24,7 @@ impl InterpretedWalRecord {
|
||||
buf: Bytes,
|
||||
shards: &[ShardIdentity],
|
||||
next_record_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<HashMap<ShardIdentity, InterpretedWalRecord>> {
|
||||
let mut decoded = DecodedWALRecord::default();
|
||||
decode_wal_record(buf, &mut decoded, pg_version)?;
|
||||
@@ -78,7 +78,7 @@ impl MetadataRecord {
|
||||
decoded: &DecodedWALRecord,
|
||||
shard_records: &mut HashMap<ShardIdentity, InterpretedWalRecord>,
|
||||
next_record_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<()> {
|
||||
// Note: this doesn't actually copy the bytes since
|
||||
// the [`Bytes`] type implements it via a level of indirection.
|
||||
@@ -193,7 +193,7 @@ impl MetadataRecord {
|
||||
fn decode_heapam_record(
|
||||
buf: &mut Bytes,
|
||||
decoded: &DecodedWALRecord,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<Option<MetadataRecord>> {
|
||||
// Handle VM bit updates that are implicitly part of heap records.
|
||||
|
||||
@@ -205,7 +205,7 @@ impl MetadataRecord {
|
||||
let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
|
||||
|
||||
match pg_version {
|
||||
14 => {
|
||||
PgMajorVersion::PG14 => {
|
||||
if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
|
||||
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
|
||||
@@ -272,7 +272,7 @@ impl MetadataRecord {
|
||||
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
|
||||
}
|
||||
}
|
||||
15 => {
|
||||
PgMajorVersion::PG15 => {
|
||||
if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
|
||||
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
|
||||
@@ -339,7 +339,7 @@ impl MetadataRecord {
|
||||
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
|
||||
}
|
||||
}
|
||||
16 => {
|
||||
PgMajorVersion::PG16 => {
|
||||
if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
|
||||
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
|
||||
@@ -406,7 +406,7 @@ impl MetadataRecord {
|
||||
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
|
||||
}
|
||||
}
|
||||
17 => {
|
||||
PgMajorVersion::PG17 => {
|
||||
if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
|
||||
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
|
||||
@@ -473,7 +473,6 @@ impl MetadataRecord {
|
||||
anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
|
||||
@@ -500,7 +499,7 @@ impl MetadataRecord {
|
||||
fn decode_neonmgr_record(
|
||||
buf: &mut Bytes,
|
||||
decoded: &DecodedWALRecord,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<Option<MetadataRecord>> {
|
||||
// Handle VM bit updates that are implicitly part of heap records.
|
||||
|
||||
@@ -514,7 +513,7 @@ impl MetadataRecord {
|
||||
assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID);
|
||||
|
||||
match pg_version {
|
||||
16 | 17 => {
|
||||
PgMajorVersion::PG16 | PgMajorVersion::PG17 => {
|
||||
let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
|
||||
match info {
|
||||
@@ -574,7 +573,7 @@ impl MetadataRecord {
|
||||
info => anyhow::bail!("Unknown WAL record type for Neon RMGR: {}", info),
|
||||
}
|
||||
}
|
||||
_ => anyhow::bail!(
|
||||
PgMajorVersion::PG15 | PgMajorVersion::PG14 => anyhow::bail!(
|
||||
"Neon RMGR has no known compatibility with PostgreSQL version {}",
|
||||
pg_version
|
||||
),
|
||||
@@ -629,116 +628,121 @@ impl MetadataRecord {
|
||||
fn decode_dbase_record(
|
||||
buf: &mut Bytes,
|
||||
decoded: &DecodedWALRecord,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<Option<MetadataRecord>> {
|
||||
// TODO: Refactor this to avoid the duplication between postgres versions.
|
||||
|
||||
let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
|
||||
tracing::debug!(%info, %pg_version, "handle RM_DBASE_ID");
|
||||
|
||||
if pg_version == 14 {
|
||||
if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
tracing::debug!("XLOG_DBASE_CREATE v14");
|
||||
match pg_version {
|
||||
PgMajorVersion::PG14 => {
|
||||
if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
tracing::debug!("XLOG_DBASE_CREATE v14");
|
||||
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
return Ok(Some(record));
|
||||
}
|
||||
}
|
||||
} else if pg_version == 15 {
|
||||
if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
|
||||
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
|
||||
} else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
|
||||
// The XLOG record was renamed between v14 and v15,
|
||||
// but the record format is the same.
|
||||
// So we can reuse XlCreateDatabase here.
|
||||
tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY");
|
||||
PgMajorVersion::PG15 => {
|
||||
if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
|
||||
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
|
||||
} else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
|
||||
// The XLOG record was renamed between v14 and v15,
|
||||
// but the record format is the same.
|
||||
// So we can reuse XlCreateDatabase here.
|
||||
tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY");
|
||||
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
return Ok(Some(record));
|
||||
}
|
||||
}
|
||||
} else if pg_version == 16 {
|
||||
if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
|
||||
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
|
||||
} else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
|
||||
// The XLOG record was renamed between v14 and v15,
|
||||
// but the record format is the same.
|
||||
// So we can reuse XlCreateDatabase here.
|
||||
tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY");
|
||||
PgMajorVersion::PG16 => {
|
||||
if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
|
||||
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
|
||||
} else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
|
||||
// The XLOG record was renamed between v14 and v15,
|
||||
// but the record format is the same.
|
||||
// So we can reuse XlCreateDatabase here.
|
||||
tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY");
|
||||
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
return Ok(Some(record));
|
||||
}
|
||||
}
|
||||
} else if pg_version == 17 {
|
||||
if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG {
|
||||
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
|
||||
} else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY {
|
||||
// The XLOG record was renamed between v14 and v15,
|
||||
// but the record format is the same.
|
||||
// So we can reuse XlCreateDatabase here.
|
||||
tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY");
|
||||
PgMajorVersion::PG17 => {
|
||||
if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG {
|
||||
tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
|
||||
} else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY {
|
||||
// The XLOG record was renamed between v14 and v15,
|
||||
// but the record format is the same.
|
||||
// So we can reuse XlCreateDatabase here.
|
||||
tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY");
|
||||
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
let createdb = XlCreateDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate {
|
||||
db_id: createdb.db_id,
|
||||
tablespace_id: createdb.tablespace_id,
|
||||
src_db_id: createdb.src_db_id,
|
||||
src_tablespace_id: createdb.src_tablespace_id,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v17::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
return Ok(Some(record));
|
||||
} else if info == postgres_ffi::v17::bindings::XLOG_DBASE_DROP {
|
||||
let dropdb = XlDropDatabase::decode(buf);
|
||||
let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop {
|
||||
db_id: dropdb.db_id,
|
||||
tablespace_ids: dropdb.tablespace_ids,
|
||||
}));
|
||||
|
||||
return Ok(Some(record));
|
||||
return Ok(Some(record));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -748,12 +752,12 @@ impl MetadataRecord {
|
||||
fn decode_clog_record(
|
||||
buf: &mut Bytes,
|
||||
decoded: &DecodedWALRecord,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<Option<MetadataRecord>> {
|
||||
let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
|
||||
|
||||
if info == pg_constants::CLOG_ZEROPAGE {
|
||||
let pageno = if pg_version < 17 {
|
||||
let pageno = if pg_version < PgMajorVersion::PG17 {
|
||||
buf.get_u32_le()
|
||||
} else {
|
||||
buf.get_u64_le() as u32
|
||||
@@ -765,7 +769,7 @@ impl MetadataRecord {
|
||||
ClogZeroPage { segno, rpageno },
|
||||
))))
|
||||
} else {
|
||||
assert!(info == pg_constants::CLOG_TRUNCATE);
|
||||
assert_eq!(info, pg_constants::CLOG_TRUNCATE);
|
||||
let xlrec = XlClogTruncate::decode(buf, pg_version);
|
||||
|
||||
Ok(Some(MetadataRecord::Clog(ClogRecord::Truncate(
|
||||
@@ -838,14 +842,14 @@ impl MetadataRecord {
|
||||
fn decode_multixact_record(
|
||||
buf: &mut Bytes,
|
||||
decoded: &DecodedWALRecord,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<Option<MetadataRecord>> {
|
||||
let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
|
||||
|
||||
if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE
|
||||
|| info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE
|
||||
{
|
||||
let pageno = if pg_version < 17 {
|
||||
let pageno = if pg_version < PgMajorVersion::PG17 {
|
||||
buf.get_u32_le()
|
||||
} else {
|
||||
buf.get_u64_le() as u32
|
||||
|
||||
@@ -13,7 +13,7 @@ use pageserver_api::keyspace::KeySpace;
|
||||
use pageserver_api::reltag::RelTag;
|
||||
use pageserver_api::shard::ShardIdentity;
|
||||
use postgres_ffi::walrecord::{DecodedBkpBlock, DecodedWALRecord};
|
||||
use postgres_ffi::{BLCKSZ, page_is_new, page_set_lsn, pg_constants};
|
||||
use postgres_ffi::{BLCKSZ, PgMajorVersion, page_is_new, page_set_lsn, pg_constants};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utils::bin_ser::BeSer;
|
||||
use utils::lsn::Lsn;
|
||||
@@ -139,7 +139,7 @@ impl SerializedValueBatch {
|
||||
decoded: DecodedWALRecord,
|
||||
shard_records: &mut HashMap<ShardIdentity, InterpretedWalRecord>,
|
||||
next_record_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<()> {
|
||||
// First determine how big the buffers need to be and allocate it up-front.
|
||||
// This duplicates some of the work below, but it's empirically much faster.
|
||||
@@ -267,7 +267,7 @@ impl SerializedValueBatch {
|
||||
fn estimate_buffer_size(
|
||||
decoded: &DecodedWALRecord,
|
||||
shard: &ShardIdentity,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> usize {
|
||||
let mut estimate: usize = 0;
|
||||
|
||||
@@ -303,7 +303,11 @@ impl SerializedValueBatch {
|
||||
estimate
|
||||
}
|
||||
|
||||
fn block_is_image(decoded: &DecodedWALRecord, blk: &DecodedBkpBlock, pg_version: u32) -> bool {
|
||||
fn block_is_image(
|
||||
decoded: &DecodedWALRecord,
|
||||
blk: &DecodedBkpBlock,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> bool {
|
||||
blk.apply_image
|
||||
&& blk.has_image
|
||||
&& decoded.xl_rmid == pg_constants::RM_XLOG_ID
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -18,6 +18,7 @@ workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
tokio-postgres.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
tokio.workspace = true
|
||||
postgres_versioninfo.workspace = true
|
||||
futures.workspace = true
|
||||
tokio-util.workspace = true
|
||||
anyhow.workspace = true
|
||||
|
||||
@@ -7,6 +7,7 @@ use detach_ancestor::AncestorDetached;
|
||||
use http_utils::error::HttpErrorBody;
|
||||
use pageserver_api::models::*;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use postgres_versioninfo::PgMajorVersion;
|
||||
pub use reqwest::Body as ReqwestBody;
|
||||
use reqwest::{IntoUrl, Method, StatusCode, Url};
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
@@ -745,9 +746,11 @@ impl Client {
|
||||
timeline_id: TimelineId,
|
||||
base_lsn: Lsn,
|
||||
end_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
basebackup_tarball: ReqwestBody,
|
||||
) -> Result<()> {
|
||||
let pg_version = pg_version.major_version_num();
|
||||
|
||||
let uri = format!(
|
||||
"{}/v1/tenant/{tenant_id}/timeline/{timeline_id}/import_basebackup?base_lsn={base_lsn}&end_lsn={end_lsn}&pg_version={pg_version}",
|
||||
self.mgmt_api_endpoint,
|
||||
|
||||
@@ -20,7 +20,8 @@ use pageserver_api::key::{Key, rel_block_to_key};
|
||||
use pageserver_api::reltag::{RelTag, SlruKind};
|
||||
use postgres_ffi::pg_constants::{PG_HBA, PGDATA_SPECIAL_FILES};
|
||||
use postgres_ffi::{
|
||||
BLCKSZ, PG_TLI, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName, dispatch_pgversion, pg_constants,
|
||||
BLCKSZ, PG_TLI, PgMajorVersion, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName,
|
||||
dispatch_pgversion, pg_constants,
|
||||
};
|
||||
use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
|
||||
use postgres_ffi_types::forknum::{INIT_FORKNUM, MAIN_FORKNUM};
|
||||
@@ -619,10 +620,7 @@ where
|
||||
};
|
||||
|
||||
if spcnode == GLOBALTABLESPACE_OID {
|
||||
let pg_version_str = match self.timeline.pg_version {
|
||||
14 | 15 => self.timeline.pg_version.to_string(),
|
||||
ver => format!("{ver}\x0A"),
|
||||
};
|
||||
let pg_version_str = self.timeline.pg_version.versionfile_string();
|
||||
let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?;
|
||||
self.ar
|
||||
.append(&header, pg_version_str.as_bytes())
|
||||
@@ -679,10 +677,7 @@ where
|
||||
if let Some(img) = relmap_img {
|
||||
let dst_path = format!("base/{dbnode}/PG_VERSION");
|
||||
|
||||
let pg_version_str = match self.timeline.pg_version {
|
||||
14 | 15 => self.timeline.pg_version.to_string(),
|
||||
ver => format!("{ver}\x0A"),
|
||||
};
|
||||
let pg_version_str = self.timeline.pg_version.versionfile_string();
|
||||
let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?;
|
||||
self.ar
|
||||
.append(&header, pg_version_str.as_bytes())
|
||||
@@ -713,7 +708,7 @@ where
|
||||
buf.extend_from_slice(&img[..]);
|
||||
let crc = crc32c::crc32c(&img[..]);
|
||||
buf.put_u32_le(crc);
|
||||
let path = if self.timeline.pg_version < 17 {
|
||||
let path = if self.timeline.pg_version < PgMajorVersion::PG17 {
|
||||
format!("pg_twophase/{xid:>08X}")
|
||||
} else {
|
||||
format!("pg_twophase/{xid:>016X}")
|
||||
|
||||
@@ -11,7 +11,7 @@ use std::num::NonZeroUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, bail, ensure};
|
||||
use anyhow::{Context, ensure};
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use once_cell::sync::OnceCell;
|
||||
use pageserver_api::config::{
|
||||
@@ -22,6 +22,7 @@ use pageserver_api::models::ImageCompressionAlgorithm;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use pem::Pem;
|
||||
use postgres_backend::AuthType;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use remote_storage::{RemotePath, RemoteStorageConfig};
|
||||
use reqwest::Url;
|
||||
use storage_broker::Uri;
|
||||
@@ -338,20 +339,16 @@ impl PageServerConf {
|
||||
//
|
||||
// Postgres distribution paths
|
||||
//
|
||||
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<Utf8PathBuf> {
|
||||
pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<Utf8PathBuf> {
|
||||
let path = self.pg_distrib_dir.clone();
|
||||
|
||||
#[allow(clippy::manual_range_patterns)]
|
||||
match pg_version {
|
||||
14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))),
|
||||
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||
}
|
||||
Ok(path.join(pg_version.v_str()))
|
||||
}
|
||||
|
||||
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<Utf8PathBuf> {
|
||||
pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<Utf8PathBuf> {
|
||||
Ok(self.pg_distrib_dir(pg_version)?.join("bin"))
|
||||
}
|
||||
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<Utf8PathBuf> {
|
||||
pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<Utf8PathBuf> {
|
||||
Ok(self.pg_distrib_dir(pg_version)?.join("lib"))
|
||||
}
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ use pageserver_api::models::{
|
||||
TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse,
|
||||
};
|
||||
use pageserver_api::shard::{ShardCount, TenantShardId};
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError};
|
||||
use scopeguard::defer;
|
||||
use serde_json::json;
|
||||
@@ -3385,7 +3386,7 @@ async fn put_tenant_timeline_import_basebackup(
|
||||
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||
let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
|
||||
let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
|
||||
let pg_version: u32 = must_parse_query_param(&request, "pg_version")?;
|
||||
let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?;
|
||||
|
||||
check_permission(&request, Some(tenant_id))?;
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ pub mod walredo;
|
||||
|
||||
use camino::Utf8Path;
|
||||
use deletion_queue::DeletionQueue;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use tenant::mgr::{BackgroundPurges, TenantManager};
|
||||
use tenant::secondary;
|
||||
use tracing::{info, info_span};
|
||||
@@ -51,7 +52,7 @@ use tracing::{info, info_span};
|
||||
/// backwards-compatible changes to the metadata format.
|
||||
pub const STORAGE_FORMAT_VERSION: u16 = 3;
|
||||
|
||||
pub const DEFAULT_PG_VERSION: u32 = 17;
|
||||
pub const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
|
||||
// Magic constants used to identify different kinds of files
|
||||
pub const IMAGE_FILE_MAGIC: u16 = 0x5A60;
|
||||
|
||||
@@ -25,7 +25,7 @@ use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace};
|
||||
use pageserver_api::models::RelSizeMigration;
|
||||
use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
|
||||
use pageserver_api::shard::ShardIdentity;
|
||||
use postgres_ffi::{BLCKSZ, TimestampTz, TransactionId};
|
||||
use postgres_ffi::{BLCKSZ, PgMajorVersion, TimestampTz, TransactionId};
|
||||
use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
|
||||
use postgres_ffi_types::{Oid, RepOriginId};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -1081,7 +1081,7 @@ impl Timeline {
|
||||
// fetch directory entry
|
||||
let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
|
||||
|
||||
if self.pg_version >= 17 {
|
||||
if self.pg_version >= PgMajorVersion::PG17 {
|
||||
Ok(TwoPhaseDirectoryV17::des(&buf)?.xids)
|
||||
} else {
|
||||
Ok(TwoPhaseDirectory::des(&buf)?
|
||||
@@ -1613,7 +1613,7 @@ impl DatadirModification<'_> {
|
||||
.push((DirectoryKind::Db, MetricsUpdate::Set(0)));
|
||||
self.put(DBDIR_KEY, Value::Image(buf.into()));
|
||||
|
||||
let buf = if self.tline.pg_version >= 17 {
|
||||
let buf = if self.tline.pg_version >= PgMajorVersion::PG17 {
|
||||
TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 {
|
||||
xids: HashSet::new(),
|
||||
})
|
||||
@@ -1967,7 +1967,7 @@ impl DatadirModification<'_> {
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Add it to the directory entry
|
||||
let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?;
|
||||
let newdirbuf = if self.tline.pg_version >= 17 {
|
||||
let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 {
|
||||
let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?;
|
||||
if !dir.xids.insert(xid) {
|
||||
Err(WalIngestErrorKind::FileAlreadyExists(xid))?;
|
||||
@@ -2383,7 +2383,7 @@ impl DatadirModification<'_> {
|
||||
) -> Result<(), WalIngestError> {
|
||||
// Remove it from the directory entry
|
||||
let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
|
||||
let newdirbuf = if self.tline.pg_version >= 17 {
|
||||
let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 {
|
||||
let mut dir = TwoPhaseDirectoryV17::des(&buf)?;
|
||||
|
||||
if !dir.xids.remove(&xid) {
|
||||
|
||||
@@ -38,6 +38,7 @@ use pageserver_api::models::{
|
||||
WalRedoManagerStatus,
|
||||
};
|
||||
use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId};
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel};
|
||||
use remote_timeline_client::index::GcCompactionState;
|
||||
use remote_timeline_client::manifest::{
|
||||
@@ -497,7 +498,7 @@ impl WalRedoManager {
|
||||
lsn: Lsn,
|
||||
base_img: Option<(Lsn, bytes::Bytes)>,
|
||||
records: Vec<(Lsn, wal_decoder::models::record::NeonWalRecord)>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
redo_attempt_type: RedoAttemptType,
|
||||
) -> Result<bytes::Bytes, walredo::Error> {
|
||||
match self {
|
||||
@@ -933,7 +934,7 @@ pub(crate) enum CreateTimelineParams {
|
||||
pub(crate) struct CreateTimelineParamsBootstrap {
|
||||
pub(crate) new_timeline_id: TimelineId,
|
||||
pub(crate) existing_initdb_timeline_id: Option<TimelineId>,
|
||||
pub(crate) pg_version: u32,
|
||||
pub(crate) pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
/// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here.
|
||||
@@ -971,7 +972,7 @@ pub(crate) enum CreateTimelineIdempotency {
|
||||
/// NB: special treatment, see comment in [`Self`].
|
||||
FailWithConflict,
|
||||
Bootstrap {
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
},
|
||||
/// NB: branches always have the same `pg_version` as their ancestor.
|
||||
/// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`]
|
||||
@@ -2541,7 +2542,7 @@ impl TenantShard {
|
||||
self: &Arc<Self>,
|
||||
new_timeline_id: TimelineId,
|
||||
initdb_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<(UninitializedTimeline, RequestContext)> {
|
||||
anyhow::ensure!(
|
||||
@@ -2593,7 +2594,7 @@ impl TenantShard {
|
||||
self: &Arc<Self>,
|
||||
new_timeline_id: TimelineId,
|
||||
initdb_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<Arc<Timeline>> {
|
||||
let (uninit_tl, ctx) = self
|
||||
@@ -2632,7 +2633,7 @@ impl TenantShard {
|
||||
self: &Arc<Self>,
|
||||
new_timeline_id: TimelineId,
|
||||
initdb_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
ctx: &RequestContext,
|
||||
in_memory_layer_desc: Vec<timeline::InMemoryLayerTestDesc>,
|
||||
delta_layer_desc: Vec<timeline::DeltaLayerTestDesc>,
|
||||
@@ -2898,7 +2899,7 @@ impl TenantShard {
|
||||
Lsn(0),
|
||||
initdb_lsn,
|
||||
initdb_lsn,
|
||||
15,
|
||||
PgMajorVersion::PG15,
|
||||
);
|
||||
this.prepare_new_timeline(
|
||||
new_timeline_id,
|
||||
@@ -5090,7 +5091,7 @@ impl TenantShard {
|
||||
pub(crate) async fn bootstrap_timeline_test(
|
||||
self: &Arc<Self>,
|
||||
timeline_id: TimelineId,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
load_existing_initdb: Option<TimelineId>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<Arc<Timeline>> {
|
||||
@@ -5232,7 +5233,7 @@ impl TenantShard {
|
||||
async fn bootstrap_timeline(
|
||||
self: &Arc<Self>,
|
||||
timeline_id: TimelineId,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
load_existing_initdb: Option<TimelineId>,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<CreateTimelineResult, CreateTimelineError> {
|
||||
@@ -5770,7 +5771,7 @@ impl TenantShard {
|
||||
async fn run_initdb(
|
||||
conf: &'static PageServerConf,
|
||||
initdb_target_dir: &Utf8Path,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
cancel: &CancellationToken,
|
||||
) -> Result<(), InitdbError> {
|
||||
let initdb_bin_path = conf
|
||||
@@ -6051,7 +6052,7 @@ pub(crate) mod harness {
|
||||
lsn: Lsn,
|
||||
base_img: Option<(Lsn, Bytes)>,
|
||||
records: Vec<(Lsn, NeonWalRecord)>,
|
||||
_pg_version: u32,
|
||||
_pg_version: PgMajorVersion,
|
||||
_redo_attempt_type: RedoAttemptType,
|
||||
) -> Result<Bytes, walredo::Error> {
|
||||
let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1));
|
||||
@@ -6223,7 +6224,7 @@ mod tests {
|
||||
async fn randomize_timeline(
|
||||
tenant: &Arc<TenantShard>,
|
||||
new_timeline_id: TimelineId,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
spec: TestTimelineSpecification,
|
||||
random: &mut rand::rngs::StdRng,
|
||||
ctx: &RequestContext,
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//! [`IndexPart`]: super::remote_timeline_client::index::IndexPart
|
||||
|
||||
use anyhow::ensure;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utils::bin_ser::{BeSer, SerializeError};
|
||||
use utils::id::TimelineId;
|
||||
@@ -136,7 +137,7 @@ struct TimelineMetadataBodyV2 {
|
||||
latest_gc_cutoff_lsn: Lsn,
|
||||
|
||||
initdb_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@@ -167,7 +168,7 @@ impl TimelineMetadata {
|
||||
ancestor_lsn: Lsn,
|
||||
latest_gc_cutoff_lsn: Lsn,
|
||||
initdb_lsn: Lsn,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> Self {
|
||||
Self {
|
||||
hdr: TimelineMetadataHeader {
|
||||
@@ -215,7 +216,7 @@ impl TimelineMetadata {
|
||||
ancestor_lsn: body.ancestor_lsn,
|
||||
latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn,
|
||||
initdb_lsn: body.initdb_lsn,
|
||||
pg_version: 14, // All timelines created before this version had pg_version 14
|
||||
pg_version: PgMajorVersion::PG14, // All timelines created before this version had pg_version 14
|
||||
};
|
||||
|
||||
hdr.format_version = METADATA_FORMAT_VERSION;
|
||||
@@ -317,7 +318,7 @@ impl TimelineMetadata {
|
||||
self.body.initdb_lsn
|
||||
}
|
||||
|
||||
pub fn pg_version(&self) -> u32 {
|
||||
pub fn pg_version(&self) -> PgMajorVersion {
|
||||
self.body.pg_version
|
||||
}
|
||||
|
||||
@@ -331,7 +332,7 @@ impl TimelineMetadata {
|
||||
Lsn::from_hex("00000000").unwrap(),
|
||||
Lsn::from_hex("00000000").unwrap(),
|
||||
Lsn::from_hex("00000000").unwrap(),
|
||||
0,
|
||||
PgMajorVersion::PG14,
|
||||
);
|
||||
let bytes = instance.to_bytes().unwrap();
|
||||
Self::from_bytes(&bytes).unwrap()
|
||||
@@ -545,7 +546,7 @@ mod tests {
|
||||
Lsn(0),
|
||||
Lsn(0),
|
||||
Lsn(0),
|
||||
14, // All timelines created before this version had pg_version 14
|
||||
PgMajorVersion::PG14, // All timelines created before this version had pg_version 14
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -565,7 +566,7 @@ mod tests {
|
||||
Lsn(0),
|
||||
// Updating this version to 17 will cause the test to fail at the
|
||||
// next assert_eq!().
|
||||
16,
|
||||
PgMajorVersion::PG16,
|
||||
);
|
||||
let expected_bytes = vec![
|
||||
/* TimelineMetadataHeader */
|
||||
|
||||
@@ -427,8 +427,8 @@ impl GcBlocking {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use std::str::FromStr;
|
||||
|
||||
use utils::id::TimelineId;
|
||||
|
||||
use super::*;
|
||||
@@ -831,7 +831,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
|
||||
archived_at: None,
|
||||
@@ -893,7 +893,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
|
||||
archived_at: Some(parse_naive_datetime("2023-04-29T09:00:00.123000000")),
|
||||
@@ -957,7 +957,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
@@ -1033,7 +1033,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
@@ -1114,7 +1114,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
@@ -1199,7 +1199,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
@@ -1287,7 +1287,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
Lsn::from_str("0/1696070").unwrap(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
).with_recalculated_checksum().unwrap(),
|
||||
deleted_at: None,
|
||||
lineage: Default::default(),
|
||||
|
||||
@@ -1622,11 +1622,6 @@ impl DeltaLayerIterator<'_> {
|
||||
pub(crate) mod test {
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use bytes::Bytes;
|
||||
use itertools::MinMaxResult;
|
||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||
use rand::{Rng, RngCore};
|
||||
|
||||
use super::*;
|
||||
use crate::DEFAULT_PG_VERSION;
|
||||
use crate::context::DownloadBehavior;
|
||||
@@ -1636,6 +1631,11 @@ pub(crate) mod test {
|
||||
use crate::tenant::storage_layer::{Layer, ResidentLayer};
|
||||
use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
|
||||
use crate::tenant::{TenantShard, Timeline};
|
||||
use bytes::Bytes;
|
||||
use itertools::MinMaxResult;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||
use rand::{Rng, RngCore};
|
||||
|
||||
/// Construct an index for a fictional delta layer and and then
|
||||
/// traverse in order to plan vectored reads for a query. Finally,
|
||||
@@ -1995,7 +1995,7 @@ pub(crate) mod test {
|
||||
let (tenant, ctx) = h.load().await;
|
||||
let ctx = &ctx;
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, ctx)
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, ctx)
|
||||
.await
|
||||
.unwrap();
|
||||
let ctx = &ctx.with_scope_timeline(&timeline);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use pageserver_api::key::{CONTROLFILE_KEY, Key};
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use tokio::task::JoinSet;
|
||||
use utils::completion::{self, Completion};
|
||||
use utils::id::TimelineId;
|
||||
@@ -45,7 +46,7 @@ async fn smoke_test() {
|
||||
.create_test_timeline_with_layers(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
Default::default(), // in-memory layers
|
||||
Default::default(),
|
||||
@@ -256,7 +257,12 @@ async fn evict_and_wait_on_wanted_deleted() {
|
||||
let (tenant, ctx) = h.load().await;
|
||||
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -341,7 +347,12 @@ fn read_wins_pending_eviction() {
|
||||
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
|
||||
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let ctx = ctx.with_scope_timeline(&timeline);
|
||||
@@ -474,7 +485,12 @@ fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) {
|
||||
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
|
||||
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let ctx = ctx.with_scope_timeline(&timeline);
|
||||
@@ -644,7 +660,12 @@ async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() {
|
||||
let (tenant, ctx) = h.load().await;
|
||||
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let ctx = ctx.with_scope_timeline(&timeline);
|
||||
@@ -730,7 +751,12 @@ async fn evict_and_wait_does_not_wait_for_download() {
|
||||
let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1));
|
||||
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let ctx = ctx.with_scope_timeline(&timeline);
|
||||
@@ -836,7 +862,12 @@ async fn eviction_cancellation_on_drop() {
|
||||
let (tenant, ctx) = h.load().await;
|
||||
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ use pageserver_api::reltag::{BlockNumber, RelTag};
|
||||
use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId};
|
||||
use postgres_connection::PgConnectionConfig;
|
||||
use postgres_ffi::v14::xlog_utils;
|
||||
use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp};
|
||||
use postgres_ffi::{PgMajorVersion, WAL_SEGMENT_SIZE, to_pg_timestamp};
|
||||
use rand::Rng;
|
||||
use remote_storage::DownloadError;
|
||||
use serde_with::serde_as;
|
||||
@@ -225,7 +225,7 @@ pub struct Timeline {
|
||||
/// to shards, and is constant through the lifetime of this Timeline.
|
||||
shard_identity: ShardIdentity,
|
||||
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgMajorVersion,
|
||||
|
||||
/// The tuple has two elements.
|
||||
/// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote).
|
||||
@@ -2913,7 +2913,7 @@ impl Timeline {
|
||||
shard_identity: ShardIdentity,
|
||||
walredo_mgr: Option<Arc<super::WalRedoManager>>,
|
||||
resources: TimelineResources,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
state: TimelineState,
|
||||
attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>,
|
||||
create_idempotency: crate::tenant::CreateTimelineIdempotency,
|
||||
@@ -7593,6 +7593,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use pageserver_api::key::Key;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use std::iter::Iterator;
|
||||
use tracing::Instrument;
|
||||
use utils::id::TimelineId;
|
||||
@@ -7667,7 +7668,7 @@ mod tests {
|
||||
.create_test_timeline_with_layers(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
Vec::new(), // in-memory layers
|
||||
delta_layers,
|
||||
@@ -7803,7 +7804,7 @@ mod tests {
|
||||
.create_test_timeline_with_layers(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
Vec::new(), // in-memory layers
|
||||
delta_layers,
|
||||
@@ -7863,7 +7864,12 @@ mod tests {
|
||||
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let timeline = tenant
|
||||
.create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx)
|
||||
.create_test_timeline(
|
||||
TimelineId::generate(),
|
||||
Lsn(0x10),
|
||||
PgMajorVersion::PG14,
|
||||
&ctx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use bytes::Bytes;
|
||||
use postgres_ffi::ControlFileData;
|
||||
use postgres_ffi::{ControlFileData, PgMajorVersion};
|
||||
use remote_storage::{
|
||||
Download, DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, Listing,
|
||||
ListingObject, RemotePath, RemoteStorageConfig,
|
||||
@@ -264,7 +264,7 @@ impl ControlFile {
|
||||
pub(crate) fn base_lsn(&self) -> Lsn {
|
||||
Lsn(self.control_file_data.checkPoint).align()
|
||||
}
|
||||
pub(crate) fn pg_version(&self) -> u32 {
|
||||
pub(crate) fn pg_version(&self) -> PgMajorVersion {
|
||||
self.try_pg_version()
|
||||
.expect("prepare() checks that try_pg_version doesn't error")
|
||||
}
|
||||
@@ -274,13 +274,14 @@ impl ControlFile {
|
||||
pub(crate) fn control_file_buf(&self) -> &Bytes {
|
||||
&self.control_file_buf
|
||||
}
|
||||
fn try_pg_version(&self) -> anyhow::Result<u32> {
|
||||
|
||||
fn try_pg_version(&self) -> anyhow::Result<PgMajorVersion> {
|
||||
Ok(match self.control_file_data.catalog_version_no {
|
||||
// thesea are from catversion.h
|
||||
202107181 => 14,
|
||||
202209061 => 15,
|
||||
202307071 => 16,
|
||||
202406281 => 17,
|
||||
202107181 => PgMajorVersion::PG14,
|
||||
202209061 => PgMajorVersion::PG15,
|
||||
202307071 => PgMajorVersion::PG16,
|
||||
202406281 => PgMajorVersion::PG17,
|
||||
catversion => {
|
||||
anyhow::bail!("unrecognized catalog version {catversion}")
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
|
||||
use pageserver_api::shard::ShardIdentity;
|
||||
use postgres_ffi::walrecord::*;
|
||||
use postgres_ffi::{
|
||||
TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, enum_pgversion_dispatch,
|
||||
fsm_logical_to_physical, pg_constants,
|
||||
PgMajorVersion, TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion,
|
||||
enum_pgversion_dispatch, fsm_logical_to_physical, pg_constants,
|
||||
};
|
||||
use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
|
||||
use tracing::*;
|
||||
@@ -781,7 +781,7 @@ impl WalIngest {
|
||||
) -> Result<(), WalIngestError> {
|
||||
let (xact_common, is_commit, is_prepared) = match record {
|
||||
XactRecord::Prepare(XactPrepare { xl_xid, data }) => {
|
||||
let xid: u64 = if modification.tline.pg_version >= 17 {
|
||||
let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 {
|
||||
self.adjust_to_full_transaction_id(xl_xid)?
|
||||
} else {
|
||||
xl_xid as u64
|
||||
@@ -886,7 +886,7 @@ impl WalIngest {
|
||||
xl_xid, parsed.xid, lsn,
|
||||
);
|
||||
|
||||
let xid: u64 = if modification.tline.pg_version >= 17 {
|
||||
let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 {
|
||||
self.adjust_to_full_transaction_id(parsed.xid)?
|
||||
} else {
|
||||
parsed.xid as u64
|
||||
@@ -1241,7 +1241,7 @@ impl WalIngest {
|
||||
if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID
|
||||
&& info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
|
||||
{
|
||||
let oldest_active_xid = if pg_version >= 17 {
|
||||
let oldest_active_xid = if pg_version >= PgMajorVersion::PG17 {
|
||||
let mut oldest_active_full_xid = cp.nextXid.value;
|
||||
for xid in modification.tline.list_twophase_files(lsn, ctx).await? {
|
||||
if xid < oldest_active_full_xid {
|
||||
@@ -1475,10 +1475,11 @@ impl WalIngest {
|
||||
|
||||
const fn rate_limiter(
|
||||
&self,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> Option<&Lazy<Mutex<RateLimit>>> {
|
||||
const MIN_PG_VERSION: u32 = 14;
|
||||
const MAX_PG_VERSION: u32 = 17;
|
||||
const MIN_PG_VERSION: u32 = PgMajorVersion::PG14.major_version_num();
|
||||
const MAX_PG_VERSION: u32 = PgMajorVersion::PG17.major_version_num();
|
||||
let pg_version = pg_version.major_version_num();
|
||||
|
||||
if pg_version < MIN_PG_VERSION || pg_version > MAX_PG_VERSION {
|
||||
return None;
|
||||
@@ -1603,6 +1604,7 @@ async fn get_relsize(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use postgres_ffi::RELSEG_SIZE;
|
||||
|
||||
use super::*;
|
||||
@@ -1625,7 +1627,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_zeroed_checkpoint_decodes_correctly() -> Result<(), anyhow::Error> {
|
||||
for i in 14..=16 {
|
||||
for i in PgMajorVersion::ALL {
|
||||
dispatch_pgversion!(i, {
|
||||
pgv::CheckPoint::decode(&pgv::ZERO_CHECKPOINT)?;
|
||||
});
|
||||
@@ -2335,7 +2337,7 @@ mod tests {
|
||||
// 5. Grep sk logs for "restart decoder" to get startpoint
|
||||
// 6. Run just the decoder from this test to get the endpoint.
|
||||
// It's the last LSN the decoder will output.
|
||||
let pg_version = 15; // The test data was generated by pg15
|
||||
let pg_version = PgMajorVersion::PG15; // The test data was generated by pg15
|
||||
let path = "test_data/sk_wal_segment_from_pgbench";
|
||||
let wal_segment_path = format!("{path}/000000010000000000000001.zst");
|
||||
let source_initdb_path = format!("{path}/{INITDB_PATH}");
|
||||
|
||||
@@ -33,6 +33,7 @@ use bytes::{Bytes, BytesMut};
|
||||
use pageserver_api::key::Key;
|
||||
use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus};
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use tracing::*;
|
||||
use utils::lsn::Lsn;
|
||||
use utils::sync::gate::GateError;
|
||||
@@ -165,7 +166,7 @@ impl PostgresRedoManager {
|
||||
lsn: Lsn,
|
||||
base_img: Option<(Lsn, Bytes)>,
|
||||
records: Vec<(Lsn, NeonWalRecord)>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
redo_attempt_type: RedoAttemptType,
|
||||
) -> Result<Bytes, Error> {
|
||||
if records.is_empty() {
|
||||
@@ -232,7 +233,7 @@ impl PostgresRedoManager {
|
||||
/// # Cancel-Safety
|
||||
///
|
||||
/// This method is cancellation-safe.
|
||||
pub async fn ping(&self, pg_version: u32) -> Result<(), Error> {
|
||||
pub async fn ping(&self, pg_version: PgMajorVersion) -> Result<(), Error> {
|
||||
self.do_with_walredo_process(pg_version, |proc| async move {
|
||||
proc.ping(Duration::from_secs(1))
|
||||
.await
|
||||
@@ -342,7 +343,7 @@ impl PostgresRedoManager {
|
||||
O,
|
||||
>(
|
||||
&self,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
closure: F,
|
||||
) -> Result<O, Error> {
|
||||
let proc: Arc<Process> = match self.redo_process.get_or_init_detached().await {
|
||||
@@ -442,7 +443,7 @@ impl PostgresRedoManager {
|
||||
base_img_lsn: Lsn,
|
||||
records: &[(Lsn, NeonWalRecord)],
|
||||
wal_redo_timeout: Duration,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
max_retry_attempts: u32,
|
||||
) -> Result<Bytes, Error> {
|
||||
*(self.last_redo_at.lock().unwrap()) = Some(Instant::now());
|
||||
@@ -572,6 +573,7 @@ mod tests {
|
||||
use bytes::Bytes;
|
||||
use pageserver_api::key::Key;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use postgres_ffi::PgMajorVersion;
|
||||
use tracing::Instrument;
|
||||
use utils::id::TenantId;
|
||||
use utils::lsn::Lsn;
|
||||
@@ -586,7 +588,7 @@ mod tests {
|
||||
let h = RedoHarness::new().unwrap();
|
||||
|
||||
h.manager
|
||||
.ping(14)
|
||||
.ping(PgMajorVersion::PG14)
|
||||
.instrument(h.span())
|
||||
.await
|
||||
.expect("ping should work");
|
||||
@@ -612,7 +614,7 @@ mod tests {
|
||||
Lsn::from_str("0/16E2408").unwrap(),
|
||||
None,
|
||||
short_records(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
RedoAttemptType::ReadPage,
|
||||
)
|
||||
.instrument(h.span())
|
||||
@@ -641,7 +643,7 @@ mod tests {
|
||||
Lsn::from_str("0/16E2408").unwrap(),
|
||||
None,
|
||||
short_records(),
|
||||
14,
|
||||
PgMajorVersion::PG14,
|
||||
RedoAttemptType::ReadPage,
|
||||
)
|
||||
.instrument(h.span())
|
||||
@@ -663,7 +665,7 @@ mod tests {
|
||||
Lsn::INVALID,
|
||||
None,
|
||||
short_records(),
|
||||
16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */
|
||||
PgMajorVersion::PG16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */
|
||||
RedoAttemptType::ReadPage,
|
||||
)
|
||||
.instrument(h.span())
|
||||
|
||||
@@ -12,7 +12,7 @@ use anyhow::Context;
|
||||
use bytes::Bytes;
|
||||
use pageserver_api::reltag::RelTag;
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use postgres_ffi::BLCKSZ;
|
||||
use postgres_ffi::{BLCKSZ, PgMajorVersion};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tracing::{Instrument, debug, error, instrument};
|
||||
use utils::lsn::Lsn;
|
||||
@@ -54,11 +54,11 @@ impl WalRedoProcess {
|
||||
//
|
||||
// Start postgres binary in special WAL redo mode.
|
||||
//
|
||||
#[instrument(skip_all,fields(pg_version=pg_version))]
|
||||
#[instrument(skip_all,fields(pg_version=pg_version.major_version_num()))]
|
||||
pub(crate) fn launch(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_shard_id: TenantShardId,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<Self> {
|
||||
crate::span::debug_assert_current_span_has_tenant_id();
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ metrics.workspace = true
|
||||
pem.workspace = true
|
||||
postgres_backend.workspace = true
|
||||
postgres_ffi.workspace = true
|
||||
postgres_versioninfo.workspace = true
|
||||
pq_proto.workspace = true
|
||||
remote_storage.workspace = true
|
||||
safekeeper_api.workspace = true
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
use std::vec;
|
||||
|
||||
use anyhow::{Result, bail};
|
||||
use postgres_versioninfo::PgVersionId;
|
||||
use pq_proto::SystemId;
|
||||
use safekeeper_api::membership::{Configuration, INVALID_GENERATION};
|
||||
use safekeeper_api::{ServerInfo, Term};
|
||||
@@ -46,7 +47,7 @@ struct SafeKeeperStateV1 {
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ServerInfoV2 {
|
||||
/// Postgres server version
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgVersionId,
|
||||
pub system_id: SystemId,
|
||||
pub tenant_id: TenantId,
|
||||
pub timeline_id: TimelineId,
|
||||
@@ -75,7 +76,7 @@ pub struct SafeKeeperStateV2 {
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ServerInfoV3 {
|
||||
/// Postgres server version
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgVersionId,
|
||||
pub system_id: SystemId,
|
||||
#[serde(with = "hex")]
|
||||
pub tenant_id: TenantId,
|
||||
@@ -444,13 +445,13 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<TimelinePersiste
|
||||
} else if version == 6 {
|
||||
info!("reading safekeeper control file version {}", version);
|
||||
let mut oldstate = TimelinePersistentState::des(&buf[..buf.len()])?;
|
||||
if oldstate.server.pg_version != 0 {
|
||||
if oldstate.server.pg_version != PgVersionId::UNKNOWN {
|
||||
return Ok(oldstate);
|
||||
}
|
||||
|
||||
// set pg_version to the default v14
|
||||
info!("setting pg_version to 140005");
|
||||
oldstate.server.pg_version = 140005;
|
||||
oldstate.server.pg_version = PgVersionId::from_full_pg_version(140005);
|
||||
|
||||
return Ok(oldstate);
|
||||
} else if version == 7 {
|
||||
@@ -547,6 +548,7 @@ pub fn downgrade_v10_to_v9(state: &TimelinePersistentState) -> TimelinePersisten
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
use postgres_versioninfo::PgMajorVersion;
|
||||
use utils::Hex;
|
||||
use utils::id::NodeId;
|
||||
|
||||
@@ -563,7 +565,7 @@ mod tests {
|
||||
epoch: 43,
|
||||
},
|
||||
server: ServerInfoV2 {
|
||||
pg_version: 14,
|
||||
pg_version: PgVersionId::from(PgMajorVersion::PG14),
|
||||
system_id: 0x1234567887654321,
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
@@ -586,8 +588,8 @@ mod tests {
|
||||
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
// epoch
|
||||
0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
// pg_version
|
||||
0x0e, 0x00, 0x00, 0x00,
|
||||
// pg_version = 140000
|
||||
0xE0, 0x22, 0x02, 0x00,
|
||||
// system_id
|
||||
0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
|
||||
// tenant_id
|
||||
@@ -626,7 +628,7 @@ mod tests {
|
||||
}]),
|
||||
},
|
||||
server: ServerInfoV2 {
|
||||
pg_version: 14,
|
||||
pg_version: PgVersionId::from(PgMajorVersion::PG14),
|
||||
system_id: 0x1234567887654321,
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
@@ -646,7 +648,7 @@ mod tests {
|
||||
let expected = [
|
||||
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
|
||||
0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
|
||||
0x34, 0x12, 0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef,
|
||||
0xaa, 0x5e, 0xcf, 0x96, 0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5,
|
||||
0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4, 0x78, 0x56, 0x34, 0x12, 0xc4, 0x7a, 0x42, 0xa5,
|
||||
@@ -675,7 +677,7 @@ mod tests {
|
||||
}]),
|
||||
},
|
||||
server: ServerInfoV3 {
|
||||
pg_version: 14,
|
||||
pg_version: PgVersionId::from(PgMajorVersion::PG14),
|
||||
system_id: 0x1234567887654321,
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
@@ -695,7 +697,7 @@ mod tests {
|
||||
let expected = [
|
||||
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
|
||||
0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
|
||||
0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34,
|
||||
0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37,
|
||||
0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36,
|
||||
@@ -731,7 +733,7 @@ mod tests {
|
||||
}]),
|
||||
},
|
||||
server: ServerInfo {
|
||||
pg_version: 14,
|
||||
pg_version: PgVersionId::from(PgMajorVersion::PG14),
|
||||
system_id: 0x1234567887654321,
|
||||
wal_seg_size: 0x12345678,
|
||||
},
|
||||
@@ -765,7 +767,7 @@ mod tests {
|
||||
0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x2a, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56,
|
||||
0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56,
|
||||
0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x34, 0x37, 0x61,
|
||||
0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39,
|
||||
0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31,
|
||||
|
||||
@@ -9,6 +9,7 @@ use anyhow::{Context, Result, bail};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use postgres_ffi::{MAX_SEND_SIZE, TimeLineID};
|
||||
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
|
||||
use pq_proto::SystemId;
|
||||
use safekeeper_api::membership::{
|
||||
INVALID_GENERATION, MemberSet, SafekeeperGeneration as Generation, SafekeeperId,
|
||||
@@ -29,7 +30,7 @@ use crate::{control_file, wal_storage};
|
||||
|
||||
pub const SK_PROTO_VERSION_2: u32 = 2;
|
||||
pub const SK_PROTO_VERSION_3: u32 = 3;
|
||||
pub const UNKNOWN_SERVER_VERSION: u32 = 0;
|
||||
pub const UNKNOWN_SERVER_VERSION: PgVersionId = PgVersionId::UNKNOWN;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct TermLsn {
|
||||
@@ -218,7 +219,7 @@ pub struct ProposerGreeting {
|
||||
pub timeline_id: TimelineId,
|
||||
pub mconf: membership::Configuration,
|
||||
/// Postgres server version
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgVersionId,
|
||||
pub system_id: SystemId,
|
||||
pub wal_seg_size: u32,
|
||||
}
|
||||
@@ -229,7 +230,7 @@ pub struct ProposerGreetingV2 {
|
||||
/// proposer-acceptor protocol version
|
||||
pub protocol_version: u32,
|
||||
/// Postgres server version
|
||||
pub pg_version: u32,
|
||||
pub pg_version: PgVersionId,
|
||||
pub proposer_id: PgUuid,
|
||||
pub system_id: SystemId,
|
||||
pub timeline_id: TimelineId,
|
||||
@@ -511,7 +512,7 @@ impl ProposerAcceptorMessage {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
mconf,
|
||||
pg_version,
|
||||
pg_version: PgVersionId::from_full_pg_version(pg_version),
|
||||
system_id,
|
||||
wal_seg_size,
|
||||
};
|
||||
@@ -961,7 +962,8 @@ where
|
||||
* because safekeepers parse WAL headers and the format
|
||||
* may change between versions.
|
||||
*/
|
||||
if msg.pg_version / 10000 != self.state.server.pg_version / 10000
|
||||
if PgMajorVersion::try_from(msg.pg_version)?
|
||||
!= PgMajorVersion::try_from(self.state.server.pg_version)?
|
||||
&& self.state.server.pg_version != UNKNOWN_SERVER_VERSION
|
||||
{
|
||||
bail!(
|
||||
@@ -1748,7 +1750,7 @@ mod tests {
|
||||
}]),
|
||||
},
|
||||
server: ServerInfo {
|
||||
pg_version: 14,
|
||||
pg_version: PgVersionId::from_full_pg_version(140000),
|
||||
system_id: 0x1234567887654321,
|
||||
wal_seg_size: 0x12345678,
|
||||
},
|
||||
|
||||
@@ -8,8 +8,8 @@ use futures::StreamExt;
|
||||
use futures::future::Either;
|
||||
use pageserver_api::shard::ShardIdentity;
|
||||
use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend};
|
||||
use postgres_ffi::get_current_timestamp;
|
||||
use postgres_ffi::waldecoder::{WalDecodeError, WalStreamDecoder};
|
||||
use postgres_ffi::{PgMajorVersion, get_current_timestamp};
|
||||
use pq_proto::{BeMessage, InterpretedWalRecordsBody, WalSndKeepAlive};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
@@ -78,7 +78,7 @@ pub(crate) struct InterpretedWalReader {
|
||||
shard_senders: HashMap<ShardIdentity, smallvec::SmallVec<[ShardSenderState; 1]>>,
|
||||
shard_notification_rx: Option<tokio::sync::mpsc::UnboundedReceiver<AttachShardNotification>>,
|
||||
state: Arc<std::sync::RwLock<InterpretedWalReaderState>>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
/// A handle for [`InterpretedWalReader`] which allows for interacting with it
|
||||
@@ -258,7 +258,7 @@ impl InterpretedWalReader {
|
||||
start_pos: Lsn,
|
||||
tx: tokio::sync::mpsc::Sender<Batch>,
|
||||
shard: ShardIdentity,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
appname: &Option<String>,
|
||||
) -> InterpretedWalReaderHandle {
|
||||
let state = Arc::new(std::sync::RwLock::new(InterpretedWalReaderState::Running {
|
||||
@@ -322,7 +322,7 @@ impl InterpretedWalReader {
|
||||
start_pos: Lsn,
|
||||
tx: tokio::sync::mpsc::Sender<Batch>,
|
||||
shard: ShardIdentity,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
shard_notification_rx: Option<
|
||||
tokio::sync::mpsc::UnboundedReceiver<AttachShardNotification>,
|
||||
>,
|
||||
@@ -718,7 +718,7 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use pageserver_api::shard::{ShardIdentity, ShardStripeSize};
|
||||
use postgres_ffi::MAX_SEND_SIZE;
|
||||
use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion};
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
use utils::id::{NodeId, TenantTimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
@@ -734,7 +734,7 @@ mod tests {
|
||||
|
||||
const SIZE: usize = 8 * 1024;
|
||||
const MSG_COUNT: usize = 200;
|
||||
const PG_VERSION: u32 = 17;
|
||||
const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
const SHARD_COUNT: u8 = 2;
|
||||
|
||||
let start_lsn = Lsn::from_str("0/149FD18").unwrap();
|
||||
@@ -876,7 +876,7 @@ mod tests {
|
||||
|
||||
const SIZE: usize = 8 * 1024;
|
||||
const MSG_COUNT: usize = 200;
|
||||
const PG_VERSION: u32 = 17;
|
||||
const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
const SHARD_COUNT: u8 = 2;
|
||||
|
||||
let start_lsn = Lsn::from_str("0/149FD18").unwrap();
|
||||
@@ -1025,7 +1025,7 @@ mod tests {
|
||||
|
||||
const SIZE: usize = 64 * 1024;
|
||||
const MSG_COUNT: usize = 10;
|
||||
const PG_VERSION: u32 = 17;
|
||||
const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
const SHARD_COUNT: u8 = 2;
|
||||
const WAL_READER_BATCH_SIZE: usize = 8192;
|
||||
|
||||
@@ -1148,7 +1148,7 @@ mod tests {
|
||||
|
||||
const SIZE: usize = 8 * 1024;
|
||||
const MSG_COUNT: usize = 10;
|
||||
const PG_VERSION: u32 = 17;
|
||||
const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
|
||||
let start_lsn = Lsn::from_str("0/149FD18").unwrap();
|
||||
let env = Env::new(true).unwrap();
|
||||
|
||||
@@ -12,7 +12,7 @@ use futures::FutureExt;
|
||||
use itertools::Itertools;
|
||||
use parking_lot::Mutex;
|
||||
use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError};
|
||||
use postgres_ffi::{MAX_SEND_SIZE, TimestampTz, get_current_timestamp};
|
||||
use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, TimestampTz, get_current_timestamp};
|
||||
use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody};
|
||||
use safekeeper_api::Term;
|
||||
use safekeeper_api::models::{
|
||||
@@ -559,7 +559,9 @@ impl SafekeeperPostgresHandler {
|
||||
format,
|
||||
compression,
|
||||
} => {
|
||||
let pg_version = tli.tli.get_state().await.1.server.pg_version / 10000;
|
||||
let pg_version =
|
||||
PgMajorVersion::try_from(tli.tli.get_state().await.1.server.pg_version)
|
||||
.unwrap();
|
||||
let end_watch_view = end_watch.view();
|
||||
let wal_residence_guard = tli.wal_residence_guard().await?;
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<Batch>(2);
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::time::SystemTime;
|
||||
|
||||
use anyhow::{Result, bail};
|
||||
use postgres_ffi::WAL_SEGMENT_SIZE;
|
||||
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
|
||||
use safekeeper_api::membership::Configuration;
|
||||
use safekeeper_api::models::{TimelineMembershipSwitchResponse, TimelineTermBumpResponse};
|
||||
use safekeeper_api::{INITIAL_TERM, ServerInfo, Term};
|
||||
@@ -149,8 +150,8 @@ impl TimelinePersistentState {
|
||||
&TenantTimelineId::empty(),
|
||||
Configuration::empty(),
|
||||
ServerInfo {
|
||||
pg_version: 170000, /* Postgres server version (major * 10000) */
|
||||
system_id: 0, /* Postgres system identifier */
|
||||
pg_version: PgVersionId::from(PgMajorVersion::PG17),
|
||||
system_id: 0, /* Postgres system identifier */
|
||||
wal_seg_size: WAL_SEGMENT_SIZE as u32,
|
||||
},
|
||||
Lsn::INVALID,
|
||||
|
||||
@@ -19,6 +19,7 @@ use futures::future::BoxFuture;
|
||||
use postgres_ffi::v14::xlog_utils::{IsPartialXLogFileName, IsXLogFileName, XLogFromFileName};
|
||||
use postgres_ffi::waldecoder::WalStreamDecoder;
|
||||
use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo, dispatch_pgversion};
|
||||
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
|
||||
use pq_proto::SystemId;
|
||||
use remote_storage::RemotePath;
|
||||
use std::sync::Arc;
|
||||
@@ -92,7 +93,7 @@ pub struct PhysicalStorage {
|
||||
|
||||
/// Size of WAL segment in bytes.
|
||||
wal_seg_size: usize,
|
||||
pg_version: u32,
|
||||
pg_version: PgVersionId,
|
||||
system_id: u64,
|
||||
|
||||
/// Written to disk, but possibly still in the cache and not fully persisted.
|
||||
@@ -180,7 +181,7 @@ impl PhysicalStorage {
|
||||
let write_lsn = if state.commit_lsn == Lsn(0) {
|
||||
Lsn(0)
|
||||
} else {
|
||||
let version = state.server.pg_version / 10000;
|
||||
let version = PgMajorVersion::try_from(state.server.pg_version).unwrap();
|
||||
|
||||
dispatch_pgversion!(
|
||||
version,
|
||||
@@ -226,7 +227,10 @@ impl PhysicalStorage {
|
||||
write_record_lsn: write_lsn,
|
||||
flush_lsn,
|
||||
flush_record_lsn: flush_lsn,
|
||||
decoder: WalStreamDecoder::new(write_lsn, state.server.pg_version / 10000),
|
||||
decoder: WalStreamDecoder::new(
|
||||
write_lsn,
|
||||
PgMajorVersion::try_from(state.server.pg_version).unwrap(),
|
||||
),
|
||||
file: None,
|
||||
pending_wal_truncation: true,
|
||||
})
|
||||
@@ -408,7 +412,7 @@ impl Storage for PhysicalStorage {
|
||||
|
||||
let segno = init_lsn.segment_number(self.wal_seg_size);
|
||||
let (mut file, _) = self.open_or_create(segno).await?;
|
||||
let major_pg_version = self.pg_version / 10000;
|
||||
let major_pg_version = PgMajorVersion::try_from(self.pg_version).unwrap();
|
||||
let wal_seg =
|
||||
postgres_ffi::generate_wal_segment(segno, self.system_id, major_pg_version, init_lsn)?;
|
||||
file.seek(SeekFrom::Start(0)).await?;
|
||||
@@ -654,7 +658,7 @@ pub struct WalReader {
|
||||
// pos is in the same segment as timeline_start_lsn.
|
||||
timeline_start_lsn: Lsn,
|
||||
// integer version number of PostgreSQL, e.g. 14; 15; 16
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
system_id: SystemId,
|
||||
timeline_start_segment: Option<Bytes>,
|
||||
}
|
||||
@@ -697,7 +701,7 @@ impl WalReader {
|
||||
wal_backup,
|
||||
local_start_lsn: state.local_start_lsn,
|
||||
timeline_start_lsn: state.timeline_start_lsn,
|
||||
pg_version: state.server.pg_version / 10000,
|
||||
pg_version: PgMajorVersion::try_from(state.server.pg_version).unwrap(),
|
||||
system_id: state.server.system_id,
|
||||
timeline_start_segment: None,
|
||||
})
|
||||
|
||||
@@ -7,8 +7,8 @@ use anyhow::Result;
|
||||
use bytes::{Buf, BytesMut};
|
||||
use futures::future::BoxFuture;
|
||||
use parking_lot::Mutex;
|
||||
use postgres_ffi::XLogSegNo;
|
||||
use postgres_ffi::waldecoder::WalStreamDecoder;
|
||||
use postgres_ffi::{PgMajorVersion, XLogSegNo};
|
||||
use safekeeper::metrics::WalStorageMetrics;
|
||||
use safekeeper::state::TimelinePersistentState;
|
||||
use safekeeper::{control_file, wal_storage};
|
||||
@@ -142,7 +142,7 @@ impl DiskWALStorage {
|
||||
write_lsn,
|
||||
write_record_lsn: flush_lsn,
|
||||
flush_record_lsn: flush_lsn,
|
||||
decoder: WalStreamDecoder::new(flush_lsn, 16),
|
||||
decoder: WalStreamDecoder::new(flush_lsn, PgMajorVersion::PG16),
|
||||
unflushed_bytes: BytesMut::new(),
|
||||
disk,
|
||||
})
|
||||
@@ -151,7 +151,7 @@ impl DiskWALStorage {
|
||||
fn find_end_of_wal(disk: Arc<TimelineDisk>, start_lsn: Lsn) -> Result<Lsn> {
|
||||
let mut buf = [0; 8192];
|
||||
let mut pos = start_lsn.0;
|
||||
let mut decoder = WalStreamDecoder::new(start_lsn, 16);
|
||||
let mut decoder = WalStreamDecoder::new(start_lsn, PgMajorVersion::PG16);
|
||||
let mut result = start_lsn;
|
||||
loop {
|
||||
disk.wal.lock().read(pos, &mut buf);
|
||||
@@ -204,7 +204,7 @@ impl wal_storage::Storage for DiskWALStorage {
|
||||
self.decoder.available(),
|
||||
startpos,
|
||||
);
|
||||
self.decoder = WalStreamDecoder::new(startpos, 16);
|
||||
self.decoder = WalStreamDecoder::new(startpos, PgMajorVersion::PG16);
|
||||
}
|
||||
self.decoder.feed_bytes(buf);
|
||||
loop {
|
||||
@@ -242,7 +242,7 @@ impl wal_storage::Storage for DiskWALStorage {
|
||||
self.write_record_lsn = end_pos;
|
||||
self.flush_record_lsn = end_pos;
|
||||
self.unflushed_bytes.clear();
|
||||
self.decoder = WalStreamDecoder::new(end_pos, 16);
|
||||
self.decoder = WalStreamDecoder::new(end_pos, PgMajorVersion::PG16);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use pageserver_api::controller_api::{
|
||||
SafekeeperDescribeResponse, SkSchedulingPolicy, TimelineImportRequest,
|
||||
};
|
||||
use pageserver_api::models::{SafekeeperInfo, SafekeepersInfo, TimelineInfo};
|
||||
use safekeeper_api::PgVersionId;
|
||||
use safekeeper_api::membership::{MemberSet, SafekeeperGeneration, SafekeeperId};
|
||||
use tokio::task::JoinSet;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -44,7 +45,7 @@ impl Service {
|
||||
&self,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
pg_version: u32,
|
||||
pg_version: PgVersionId,
|
||||
timeline_persistence: &TimelinePersistence,
|
||||
) -> Result<Vec<NodeId>, ApiError> {
|
||||
// If quorum is reached, return if we are outside of a specified timeout
|
||||
@@ -219,7 +220,7 @@ impl Service {
|
||||
read_only: bool,
|
||||
) -> Result<SafekeepersInfo, ApiError> {
|
||||
let timeline_id = timeline_info.timeline_id;
|
||||
let pg_version = timeline_info.pg_version * 10000;
|
||||
let pg_version = PgVersionId::from(timeline_info.pg_version);
|
||||
// Initially start_lsn is determined by last_record_lsn in pageserver
|
||||
// response as it does initdb. However, later we persist it and in sk
|
||||
// creation calls replace with the value from the timeline row if it
|
||||
|
||||
@@ -172,7 +172,7 @@ def test_cannot_create_endpoint_on_non_uploaded_timeline(neon_env_builder: NeonE
|
||||
env.initial_tenant,
|
||||
env.initial_timeline,
|
||||
MembershipConfiguration(generation=1, members=[sk.safekeeper_id()], new_members=None),
|
||||
int(env.pg_version),
|
||||
int(env.pg_version) * 10000,
|
||||
Lsn(0),
|
||||
None,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user