Rename old project name references

This commit is contained in:
Kirill Bulatov
2022-09-13 15:43:53 +03:00
committed by Kirill Bulatov
parent 260ec20a02
commit b8eb908a3d
128 changed files with 1428 additions and 1495 deletions

8
Cargo.lock generated
View File

@@ -2048,7 +2048,7 @@ dependencies = [
[[package]]
name = "postgres"
version = "0.19.2"
source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
dependencies = [
"bytes",
"fallible-iterator",
@@ -2061,7 +2061,7 @@ dependencies = [
[[package]]
name = "postgres-protocol"
version = "0.6.4"
source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
dependencies = [
"base64",
"byteorder",
@@ -2079,7 +2079,7 @@ dependencies = [
[[package]]
name = "postgres-types"
version = "0.2.3"
source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
dependencies = [
"bytes",
"fallible-iterator",
@@ -3295,7 +3295,7 @@ dependencies = [
[[package]]
name = "tokio-postgres"
version = "0.7.6"
source = "git+https://github.com/zenithdb/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
source = "git+https://github.com/neondatabase/rust-postgres.git?rev=d052ee8b86fff9897c77b0fe89ea9daba0e1fa38#d052ee8b86fff9897c77b0fe89ea9daba0e1fa38"
dependencies = [
"async-trait",
"byteorder",

View File

@@ -70,4 +70,4 @@ lto = true
# This is only needed for proxy's tests.
# TODO: we should probably fork `tokio-postgres-rustls` instead.
[patch.crates-io]
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }

View File

@@ -22,7 +22,7 @@ RUN set -e \
&& rm -rf pg_install/v15/build \
&& tar -C pg_install/v14 -czf /home/nonroot/postgres_install.tar.gz .
# Build zenith binaries
# Build neon binaries
FROM $REPOSITORY/$IMAGE:$TAG AS build
WORKDIR /home/nonroot
ARG GIT_VERSION=local
@@ -60,12 +60,12 @@ RUN set -e \
openssl \
ca-certificates \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& useradd -d /data zenith \
&& chown -R zenith:zenith /data
&& useradd -d /data neon \
&& chown -R neon:neon /data
COPY --from=build --chown=zenith:zenith /home/nonroot/target/release/pageserver /usr/local/bin
COPY --from=build --chown=zenith:zenith /home/nonroot/target/release/safekeeper /usr/local/bin
COPY --from=build --chown=zenith:zenith /home/nonroot/target/release/proxy /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
# v14 is default for now
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/
@@ -73,7 +73,7 @@ COPY --from=pg-build /home/nonroot/postgres_install.tar.gz /data/
# By default, pageserver uses `.neon/` working directory in WORKDIR, so create one and fill it with the dummy config.
# Now, when `docker run ... pageserver` is run, it can start without errors, yet will have some default dummy values.
RUN mkdir -p /data/.neon/ && chown -R zenith:zenith /data/.neon/ \
RUN mkdir -p /data/.neon/ && chown -R neon:neon /data/.neon/ \
&& /usr/local/bin/pageserver -D /data/.neon/ --init \
-c "id=1234" \
-c "broker_endpoints=['http://etcd:2379']" \
@@ -82,7 +82,7 @@ RUN mkdir -p /data/.neon/ && chown -R zenith:zenith /data/.neon/ \
-c "listen_http_addr='0.0.0.0:9898'"
VOLUME ["/data"]
USER zenith
USER neon
EXPOSE 6400
EXPOSE 9898
CMD ["/bin/bash"]

View File

@@ -10,12 +10,12 @@ clap = "3.0"
env_logger = "0.9"
hyper = { version = "0.14", features = ["full"] }
log = { version = "0.4", features = ["std", "serde"] }
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
regex = "1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
tar = "0.4"
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
url = "2.2.2"
workspace_hack = { version = "0.1", path = "../workspace_hack" }

View File

@@ -8,7 +8,7 @@ clap = "3.0"
comfy-table = "5.0.1"
git-version = "0.3.5"
tar = "0.4.38"
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
serde = { version = "1.0", features = ["derive"] }
serde_with = "1.12.0"
toml = "0.5"

View File

@@ -1,4 +1,4 @@
# Minimal zenith environment with one safekeeper. This is equivalent to the built-in
# Minimal neon environment with one safekeeper. This is equivalent to the built-in
# defaults that you get with no --config
[pageserver]
listen_pg_addr = '127.0.0.1:64000'

View File

@@ -27,10 +27,10 @@ use std::process::exit;
use std::str::FromStr;
use utils::{
auth::{Claims, Scope},
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
lsn::Lsn,
postgres_backend::AuthType,
project_git_version,
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
// Default id of a safekeeper node, if not specified on the command line.
@@ -72,7 +72,7 @@ struct TimelineTreeEl {
/// Name, recovered from neon config mappings
pub name: Option<String>,
/// Holds all direct children of this timeline referenced using `timeline_id`.
pub children: BTreeSet<ZTimelineId>,
pub children: BTreeSet<TimelineId>,
}
// Main entry point for the 'neon_local' CLI utility
@@ -321,7 +321,7 @@ fn main() -> Result<()> {
///
fn print_timelines_tree(
timelines: Vec<TimelineInfo>,
mut timeline_name_mappings: HashMap<ZTenantTimelineId, String>,
mut timeline_name_mappings: HashMap<TenantTimelineId, String>,
) -> Result<()> {
let mut timelines_hash = timelines
.iter()
@@ -332,7 +332,7 @@ fn print_timelines_tree(
info: t.clone(),
children: BTreeSet::new(),
name: timeline_name_mappings
.remove(&ZTenantTimelineId::new(t.tenant_id, t.timeline_id)),
.remove(&TenantTimelineId::new(t.tenant_id, t.timeline_id)),
},
)
})
@@ -374,7 +374,7 @@ fn print_timeline(
nesting_level: usize,
is_last: &[bool],
timeline: &TimelineTreeEl,
timelines: &HashMap<ZTimelineId, TimelineTreeEl>,
timelines: &HashMap<TimelineId, TimelineTreeEl>,
) -> Result<()> {
let local_remote = match (timeline.info.local.as_ref(), timeline.info.remote.as_ref()) {
(None, None) => unreachable!("in this case no info for a timeline is found"),
@@ -452,8 +452,8 @@ fn print_timeline(
/// Connects to the pageserver to query this information.
fn get_timeline_infos(
env: &local_env::LocalEnv,
tenant_id: &ZTenantId,
) -> Result<HashMap<ZTimelineId, TimelineInfo>> {
tenant_id: &TenantId,
) -> Result<HashMap<TimelineId, TimelineInfo>> {
Ok(PageServerNode::from_env(env)
.timeline_list(tenant_id)?
.into_iter()
@@ -462,7 +462,7 @@ fn get_timeline_infos(
}
// Helper function to parse --tenant_id option, or get the default from config file
fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<ZTenantId> {
fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<TenantId> {
if let Some(tenant_id_from_arguments) = parse_tenant_id(sub_match).transpose() {
tenant_id_from_arguments
} else if let Some(default_id) = env.default_tenant_id {
@@ -472,18 +472,18 @@ fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::R
}
}
fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result<Option<ZTenantId>> {
fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TenantId>> {
sub_match
.value_of("tenant-id")
.map(ZTenantId::from_str)
.map(TenantId::from_str)
.transpose()
.context("Failed to parse tenant id from the argument string")
}
fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result<Option<ZTimelineId>> {
fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TimelineId>> {
sub_match
.value_of("timeline-id")
.map(ZTimelineId::from_str)
.map(TimelineId::from_str)
.transpose()
.context("Failed to parse timeline id from the argument string")
}
@@ -504,9 +504,9 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
let mut env =
LocalEnv::parse_config(&toml_file).context("Failed to create neon configuration")?;
env.init().context("Failed to initialize neon repository")?;
// default_tenantid was generated by the `env.init()` call above
let initial_tenant_id = env.default_tenant_id.unwrap();
let initial_tenant_id = env
.default_tenant_id
.expect("default_tenant_id should be generated by the `env.init()` call above");
// Initialize pageserver, create initial tenant and timeline.
let pageserver = PageServerNode::from_env(&env);
@@ -759,7 +759,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
};
let branch_name = timeline_name_mappings
.get(&ZTenantTimelineId::new(tenant_id, node.timeline_id))
.get(&TenantTimelineId::new(tenant_id, node.timeline_id))
.map(|name| name.as_str())
.unwrap_or("?");
@@ -810,7 +810,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
let node = cplane.nodes.get(&(tenant_id, node_name.to_owned()));
let auth_token = if matches!(env.pageserver.auth_type, AuthType::ZenithJWT) {
let auth_token = if matches!(env.pageserver.auth_type, AuthType::NeonJWT) {
let claims = Claims::new(Some(tenant_id), Scope::Tenant);
Some(env.generate_auth_token(&claims)?)

View File

@@ -13,9 +13,9 @@ use std::time::Duration;
use anyhow::{Context, Result};
use utils::{
connstring::connection_host_port,
id::{TenantId, TimelineId},
lsn::Lsn,
postgres_backend::AuthType,
zid::{ZTenantId, ZTimelineId},
};
use crate::local_env::LocalEnv;
@@ -28,7 +28,7 @@ use crate::storage::PageServerNode;
pub struct ComputeControlPlane {
base_port: u16,
pageserver: Arc<PageServerNode>,
pub nodes: BTreeMap<(ZTenantId, String), Arc<PostgresNode>>,
pub nodes: BTreeMap<(TenantId, String), Arc<PostgresNode>>,
env: LocalEnv,
}
@@ -76,9 +76,9 @@ impl ComputeControlPlane {
pub fn new_node(
&mut self,
tenant_id: ZTenantId,
tenant_id: TenantId,
name: &str,
timeline_id: ZTimelineId,
timeline_id: TimelineId,
lsn: Option<Lsn>,
port: Option<u16>,
) -> Result<Arc<PostgresNode>> {
@@ -114,9 +114,9 @@ pub struct PostgresNode {
pub env: LocalEnv,
pageserver: Arc<PageServerNode>,
is_test: bool,
pub timeline_id: ZTimelineId,
pub timeline_id: TimelineId,
pub lsn: Option<Lsn>, // if it's a read-only node. None for primary
pub tenant_id: ZTenantId,
pub tenant_id: TenantId,
uses_wal_proposer: bool,
}
@@ -148,8 +148,8 @@ impl PostgresNode {
// Read a few options from the config file
let context = format!("in config file {}", cfg_path_str);
let port: u16 = conf.parse_field("port", &context)?;
let timeline_id: ZTimelineId = conf.parse_field("neon.timeline_id", &context)?;
let tenant_id: ZTenantId = conf.parse_field("neon.tenant_id", &context)?;
let timeline_id: TimelineId = conf.parse_field("neon.timeline_id", &context)?;
let tenant_id: TenantId = conf.parse_field("neon.tenant_id", &context)?;
let uses_wal_proposer = conf.get("neon.safekeepers").is_some();
// parse recovery_target_lsn, if any
@@ -292,7 +292,7 @@ impl PostgresNode {
// variable during compute pg startup. It is done this way because
// otherwise user will be able to retrieve the value using SHOW
// command or pg_settings
let password = if let AuthType::ZenithJWT = auth_type {
let password = if let AuthType::NeonJWT = auth_type {
"$ZENITH_AUTH_TOKEN"
} else {
""
@@ -301,7 +301,7 @@ impl PostgresNode {
// Also note that not all parameters are supported here. Because in compute we substitute $ZENITH_AUTH_TOKEN
// We parse this string and build it back with token from env var, and for simplicity rebuild
// uses only needed variables namely host, port, user, password.
format!("postgresql://no_user:{}@{}:{}", password, host, port)
format!("postgresql://no_user:{password}@{host}:{port}")
};
conf.append("shared_preload_libraries", "neon");
conf.append_line("");

View File

@@ -14,8 +14,8 @@ use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use utils::{
auth::{encode_from_key_file, Claims, Scope},
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
postgres_backend::AuthType,
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
use crate::safekeeper::SafekeeperNode;
@@ -48,13 +48,13 @@ pub struct LocalEnv {
// Path to pageserver binary.
#[serde(default)]
pub zenith_distrib_dir: PathBuf,
pub neon_distrib_dir: PathBuf,
// Default tenant ID to use with the 'zenith' command line utility, when
// --tenantid is not explicitly specified.
// Default tenant ID to use with the 'neon_local' command line utility, when
// --tenant_id is not explicitly specified.
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub default_tenant_id: Option<ZTenantId>,
pub default_tenant_id: Option<TenantId>,
// used to issue tokens during e.g pg start
#[serde(default)]
@@ -69,11 +69,11 @@ pub struct LocalEnv {
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
#[serde(default)]
// A `HashMap<String, HashMap<ZTenantId, ZTimelineId>>` would be more appropriate here,
// A `HashMap<String, HashMap<TenantId, TimelineId>>` would be more appropriate here,
// but deserialization into a generic toml object as `toml::Value::try_from` fails with an error.
// https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table".
#[serde_as(as = "HashMap<_, Vec<(DisplayFromStr, DisplayFromStr)>>")]
branch_name_mappings: HashMap<String, Vec<(ZTenantId, ZTimelineId)>>,
branch_name_mappings: HashMap<String, Vec<(TenantId, TimelineId)>>,
}
/// Etcd broker config for cluster internal communication.
@@ -204,20 +204,20 @@ impl LocalEnv {
}
pub fn pageserver_bin(&self) -> anyhow::Result<PathBuf> {
Ok(self.zenith_distrib_dir.join("pageserver"))
Ok(self.neon_distrib_dir.join("pageserver"))
}
pub fn safekeeper_bin(&self) -> anyhow::Result<PathBuf> {
Ok(self.zenith_distrib_dir.join("safekeeper"))
Ok(self.neon_distrib_dir.join("safekeeper"))
}
pub fn pg_data_dirs_path(&self) -> PathBuf {
self.base_data_dir.join("pgdatadirs").join("tenants")
}
pub fn pg_data_dir(&self, tenantid: &ZTenantId, branch_name: &str) -> PathBuf {
pub fn pg_data_dir(&self, tenant_id: &TenantId, branch_name: &str) -> PathBuf {
self.pg_data_dirs_path()
.join(tenantid.to_string())
.join(tenant_id.to_string())
.join(branch_name)
}
@@ -233,8 +233,8 @@ impl LocalEnv {
pub fn register_branch_mapping(
&mut self,
branch_name: String,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
) -> anyhow::Result<()> {
let existing_values = self
.branch_name_mappings
@@ -260,22 +260,22 @@ impl LocalEnv {
pub fn get_branch_timeline_id(
&self,
branch_name: &str,
tenant_id: ZTenantId,
) -> Option<ZTimelineId> {
tenant_id: TenantId,
) -> Option<TimelineId> {
self.branch_name_mappings
.get(branch_name)?
.iter()
.find(|(mapped_tenant_id, _)| mapped_tenant_id == &tenant_id)
.map(|&(_, timeline_id)| timeline_id)
.map(ZTimelineId::from)
.map(TimelineId::from)
}
pub fn timeline_name_mappings(&self) -> HashMap<ZTenantTimelineId, String> {
pub fn timeline_name_mappings(&self) -> HashMap<TenantTimelineId, String> {
self.branch_name_mappings
.iter()
.flat_map(|(name, tenant_timelines)| {
tenant_timelines.iter().map(|&(tenant_id, timeline_id)| {
(ZTenantTimelineId::new(tenant_id, timeline_id), name.clone())
(TenantTimelineId::new(tenant_id, timeline_id), name.clone())
})
})
.collect()
@@ -299,14 +299,14 @@ impl LocalEnv {
}
}
// Find zenith binaries.
if env.zenith_distrib_dir == Path::new("") {
env.zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
// Find neon binaries.
if env.neon_distrib_dir == Path::new("") {
env.neon_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
}
// If no initial tenant ID was given, generate it.
if env.default_tenant_id.is_none() {
env.default_tenant_id = Some(ZTenantId::generate());
env.default_tenant_id = Some(TenantId::generate());
}
env.base_data_dir = base_path();
@@ -320,12 +320,12 @@ impl LocalEnv {
if !repopath.exists() {
bail!(
"Zenith config is not found in {}. You need to run 'neon_local init' first",
"Neon config is not found in {}. You need to run 'neon_local init' first",
repopath.to_str().unwrap()
);
}
// TODO: check that it looks like a zenith repository
// TODO: check that it looks like a neon repository
// load and parse file
let config = fs::read_to_string(repopath.join("config"))?;
@@ -404,10 +404,10 @@ impl LocalEnv {
);
}
for binary in ["pageserver", "safekeeper"] {
if !self.zenith_distrib_dir.join(binary).exists() {
if !self.neon_distrib_dir.join(binary).exists() {
bail!(
"Can't find binary '{binary}' in zenith distrib dir '{}'",
self.zenith_distrib_dir.display()
"Can't find binary '{binary}' in neon distrib dir '{}'",
self.neon_distrib_dir.display()
);
}
}

View File

@@ -2,7 +2,7 @@
/// Module for parsing postgresql.conf file.
///
/// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just
/// enough to extract a few settings we need in Zenith, assuming you don't do
/// enough to extract a few settings we need in Neon, assuming you don't do
/// funny stuff like include-directives or funny escaping.
use anyhow::{bail, Context, Result};
use once_cell::sync::Lazy;

View File

@@ -17,7 +17,7 @@ use thiserror::Error;
use utils::{
connstring::connection_address,
http::error::HttpErrorBody,
zid::{NodeId, ZTenantId, ZTimelineId},
id::{NodeId, TenantId, TimelineId},
};
use crate::local_env::{LocalEnv, SafekeeperConf};
@@ -269,7 +269,7 @@ impl SafekeeperNode {
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
// TODO: authentication
//if self.env.auth_type == AuthType::ZenithJWT {
//if self.env.auth_type == AuthType::NeonJWT {
// builder = builder.bearer_auth(&self.env.safekeeper_auth_token)
//}
self.http_client.request(method, url)
@@ -284,8 +284,8 @@ impl SafekeeperNode {
pub fn timeline_create(
&self,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
peer_ids: Vec<NodeId>,
) -> Result<()> {
Ok(self

View File

@@ -21,9 +21,9 @@ use thiserror::Error;
use utils::{
connstring::connection_address,
http::error::HttpErrorBody,
id::{TenantId, TimelineId},
lsn::Lsn,
postgres_backend::AuthType,
zid::{ZTenantId, ZTimelineId},
};
use crate::local_env::LocalEnv;
@@ -83,7 +83,7 @@ pub struct PageServerNode {
impl PageServerNode {
pub fn from_env(env: &LocalEnv) -> PageServerNode {
let password = if env.pageserver.auth_type == AuthType::ZenithJWT {
let password = if env.pageserver.auth_type == AuthType::NeonJWT {
&env.pageserver.auth_token
} else {
""
@@ -109,10 +109,10 @@ impl PageServerNode {
pub fn initialize(
&self,
create_tenant: Option<ZTenantId>,
initial_timeline_id: Option<ZTimelineId>,
create_tenant: Option<TenantId>,
initial_timeline_id: Option<TimelineId>,
config_overrides: &[&str],
) -> anyhow::Result<ZTimelineId> {
) -> anyhow::Result<TimelineId> {
let id = format!("id={}", self.env.pageserver.id);
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
let pg_distrib_dir_param =
@@ -173,9 +173,9 @@ impl PageServerNode {
fn try_init_timeline(
&self,
new_tenant_id: Option<ZTenantId>,
new_timeline_id: Option<ZTimelineId>,
) -> anyhow::Result<ZTimelineId> {
new_tenant_id: Option<TenantId>,
new_timeline_id: Option<TimelineId>,
) -> anyhow::Result<TimelineId> {
let initial_tenant_id = self.tenant_create(new_tenant_id, HashMap::new())?;
let initial_timeline_info =
self.timeline_create(initial_tenant_id, new_timeline_id, None, None)?;
@@ -345,7 +345,7 @@ impl PageServerNode {
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
let mut builder = self.http_client.request(method, url);
if self.env.pageserver.auth_type == AuthType::ZenithJWT {
if self.env.pageserver.auth_type == AuthType::NeonJWT {
builder = builder.bearer_auth(&self.env.pageserver.auth_token)
}
builder
@@ -368,9 +368,9 @@ impl PageServerNode {
pub fn tenant_create(
&self,
new_tenant_id: Option<ZTenantId>,
new_tenant_id: Option<TenantId>,
settings: HashMap<&str, &str>,
) -> anyhow::Result<ZTenantId> {
) -> anyhow::Result<TenantId> {
self.http_request(Method::POST, format!("{}/tenant", self.http_base_url))
.json(&TenantCreateRequest {
new_tenant_id,
@@ -422,7 +422,7 @@ impl PageServerNode {
})
}
pub fn tenant_config(&self, tenant_id: ZTenantId, settings: HashMap<&str, &str>) -> Result<()> {
pub fn tenant_config(&self, tenant_id: TenantId, settings: HashMap<&str, &str>) -> Result<()> {
self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url))
.json(&TenantConfigRequest {
tenant_id,
@@ -471,7 +471,7 @@ impl PageServerNode {
Ok(())
}
pub fn timeline_list(&self, tenant_id: &ZTenantId) -> anyhow::Result<Vec<TimelineInfo>> {
pub fn timeline_list(&self, tenant_id: &TenantId) -> anyhow::Result<Vec<TimelineInfo>> {
let timeline_infos: Vec<TimelineInfo> = self
.http_request(
Method::GET,
@@ -486,10 +486,10 @@ impl PageServerNode {
pub fn timeline_create(
&self,
tenant_id: ZTenantId,
new_timeline_id: Option<ZTimelineId>,
tenant_id: TenantId,
new_timeline_id: Option<TimelineId>,
ancestor_start_lsn: Option<Lsn>,
ancestor_timeline_id: Option<ZTimelineId>,
ancestor_timeline_id: Option<TimelineId>,
) -> anyhow::Result<TimelineInfo> {
self.http_request(
Method::POST,
@@ -524,8 +524,8 @@ impl PageServerNode {
/// * `pg_wal` - if there's any wal to import: (end lsn, path to `pg_wal.tar`)
pub fn timeline_import(
&self,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
base: (Lsn, PathBuf),
pg_wal: Option<(Lsn, PathBuf)>,
) -> anyhow::Result<()> {

View File

@@ -2,14 +2,14 @@
### Overview
Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `zenith init`. Using following openssl commands:
Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `neon_local init`. Using following openssl commands:
```bash
openssl genrsa -out private_key.pem 2048
openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
```
CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `ZenithJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file.
CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `NeonJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file.
Currently there is no authentication between compute and safekeepers, because this communication layer is under heavy refactoring. After this refactoring support for authentication will be added there too. Now safekeeper supports "hardcoded" token passed via environment variable to be able to use callmemaybe command in pageserver.

View File

@@ -2,26 +2,26 @@
### Overview
Zenith supports multitenancy. One pageserver can serve multiple tenants at once. Tenants can be managed via zenith CLI. During page server setup tenant can be created using ```zenith init --create-tenant``` Also tenants can be added into the system on the fly without pageserver restart. This can be done using the following cli command: ```zenith tenant create``` Tenants use random identifiers which can be represented as a 32 symbols hexadecimal string. So zenith tenant create accepts desired tenant id as an optional argument. The concept of timelines/branches is working independently per tenant.
Neon supports multitenancy. One pageserver can serve multiple tenants at once. Tenants can be managed via neon_local CLI. During page server setup tenant can be created using ```neon_local init --create-tenant``` Also tenants can be added into the system on the fly without pageserver restart. This can be done using the following cli command: ```neon_local tenant create``` Tenants use random identifiers which can be represented as a 32 symbols hexadecimal string. So neon_local tenant create accepts desired tenant id as an optional argument. The concept of timelines/branches is working independently per tenant.
### Tenants in other commands
By default during `zenith init` new tenant is created on the pageserver. Newly created tenant's id is saved to cli config, so other commands can use it automatically if no direct argument `--tenantid=<tenantid>` is provided. So generally tenantid more frequently appears in internal pageserver interface. Its commands take tenantid argument to distinguish to which tenant operation should be applied. CLI support creation of new tenants.
By default during `neon_local init` new tenant is created on the pageserver. Newly created tenant's id is saved to cli config, so other commands can use it automatically if no direct argument `--tenant_id=<tenant_id>` is provided. So generally tenant_id more frequently appears in internal pageserver interface. Its commands take tenant_id argument to distinguish to which tenant operation should be applied. CLI support creation of new tenants.
Examples for cli:
```sh
zenith tenant list
neon_local tenant list
zenith tenant create // generates new id
neon_local tenant create // generates new id
zenith tenant create ee6016ec31116c1b7c33dfdfca38892f
neon_local tenant create ee6016ec31116c1b7c33dfdfca38892f
zenith pg create main // default tenant from zenith init
neon_local pg create main // default tenant from neon init
zenith pg create main --tenantid=ee6016ec31116c1b7c33dfdfca38892f
neon_local pg create main --tenant_id=ee6016ec31116c1b7c33dfdfca38892f
zenith branch --tenantid=ee6016ec31116c1b7c33dfdfca38892f
neon_local branch --tenant_id=ee6016ec31116c1b7c33dfdfca38892f
```
### Data layout
@@ -56,4 +56,4 @@ Tenant id is passed to postgres via GUC the same way as the timeline. Tenant id
### Safety
For now particular tenant can only appear on a particular pageserver. Set of safekeepers are also pinned to particular (tenantid, timeline) pair so there can only be one writer for particular (tenantid, timeline).
For now particular tenant can only appear on a particular pageserver. Set of safekeepers are also pinned to particular (tenant_id, timeline_id) pair so there can only be one writer for particular (tenant_id, timeline_id).

View File

@@ -109,7 +109,7 @@ Repository
The repository stores all the page versions, or WAL records needed to
reconstruct them. Each tenant has a separate Repository, which is
stored in the .neon/tenants/<tenantid> directory.
stored in the .neon/tenants/<tenant_id> directory.
Repository is an abstract trait, defined in `repository.rs`. It is
implemented by the LayeredRepository object in

View File

@@ -123,7 +123,7 @@ The files are called "layer files". Each layer file covers a range of keys, and
a range of LSNs (or a single LSN, in case of image layers). You can think of it
as a rectangle in the two-dimensional key-LSN space. The layer files for each
timeline are stored in the timeline's subdirectory under
`.neon/tenants/<tenantid>/timelines`.
`.neon/tenants/<tenant_id>/timelines`.
There are two kind of layer files: images, and delta layers. An image file
contains a snapshot of all keys at a particular LSN, whereas a delta file
@@ -351,7 +351,7 @@ branch.
Note: It doesn't make any difference if the child branch is created
when the end of the main branch was at LSN 250, or later when the tip of
the main branch had already moved on. The latter case, creating a
branch at a historic LSN, is how we support PITR in Zenith.
branch at a historic LSN, is how we support PITR in Neon.
# Garbage collection

View File

@@ -9,7 +9,7 @@ This feature allows to migrate a timeline from one pageserver to another by util
Pageserver implements two new http handlers: timeline attach and timeline detach.
Timeline migration is performed in a following way:
1. Timeline attach is called on a target pageserver. This asks pageserver to download latest checkpoint uploaded to s3.
2. For now it is necessary to manually initialize replication stream via callmemaybe call so target pageserver initializes replication from safekeeper (it is desired to avoid this and initialize replication directly in attach handler, but this requires some refactoring (probably [#997](https://github.com/zenithdb/zenith/issues/997)/[#1049](https://github.com/zenithdb/zenith/issues/1049))
2. For now it is necessary to manually initialize replication stream via callmemaybe call so target pageserver initializes replication from safekeeper (it is desired to avoid this and initialize replication directly in attach handler, but this requires some refactoring (probably [#997](https://github.com/neondatabase/neon/issues/997)/[#1049](https://github.com/neondatabase/neon/issues/1049))
3. Replication state can be tracked via timeline detail pageserver call.
4. Compute node should be restarted with new pageserver connection string. Issue with multiple compute nodes for one timeline is handled on the safekeeper consensus level. So this is not a problem here.Currently responsibility for rescheduling the compute with updated config lies on external coordinator (console).
5. Timeline is detached from old pageserver. On disk data is removed.
@@ -18,5 +18,5 @@ Timeline migration is performed in a following way:
### Implementation details
Now safekeeper needs to track which pageserver it is replicating to. This introduces complications into replication code:
* We need to distinguish different pageservers (now this is done by connection string which is imperfect and is covered here: https://github.com/zenithdb/zenith/issues/1105). Callmemaybe subscription management also needs to track that (this is already implemented).
* We need to distinguish different pageservers (now this is done by connection string which is imperfect and is covered here: https://github.com/neondatabase/neon/issues/1105). Callmemaybe subscription management also needs to track that (this is already implemented).
* We need to track which pageserver is the primary. This is needed to avoid reconnections to non primary pageservers. Because we shouldn't reconnect to them when they decide to stop their walreceiver. I e this can appear when there is a load on the compute and we are trying to detach timeline from old pageserver. In this case callmemaybe will try to reconnect to it because replication termination condition is not met (page server with active compute could never catch up to the latest lsn, so there is always some wal tail)

View File

@@ -70,7 +70,7 @@ two options.
...start sending WAL conservatively since the horizon (1.1), and truncate
obsolete part of WAL only when recovery is finished, i.e. epochStartLsn (4) is
reached, i.e. 2.3 transferred -- that's what https://github.com/zenithdb/zenith/pull/505 proposes.
reached, i.e. 2.3 transferred -- that's what https://github.com/neondatabase/neon/pull/505 proposes.
Then the following is possible:

View File

@@ -15,7 +15,7 @@ The stateless compute node that performs validation is separate from the storage
Limit the maximum size of a PostgreSQL instance to limit free tier users (and other tiers in the future).
First of all, this is needed to control our free tier production costs.
Another reason to limit resources is risk management — we haven't (fully) tested and optimized zenith for big clusters,
Another reason to limit resources is risk management — we haven't (fully) tested and optimized neon for big clusters,
so we don't want to give users access to the functionality that we don't think is ready.
## Components
@@ -43,20 +43,20 @@ Then this size should be reported to compute node.
`current_timeline_size` value is included in the walreceiver's custom feedback message: `ReplicationFeedback.`
(PR about protocol changes https://github.com/zenithdb/zenith/pull/1037).
(PR about protocol changes https://github.com/neondatabase/neon/pull/1037).
This message is received by the safekeeper and propagated to compute node as a part of `AppendResponse`.
Finally, when compute node receives the `current_timeline_size` from safekeeper (or from pageserver directly), it updates the global variable.
And then every zenith_extend() operation checks if limit is reached `(current_timeline_size > neon.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so.
And then every neon_extend() operation checks if limit is reached `(current_timeline_size > neon.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so.
(see Postgres error codes [https://www.postgresql.org/docs/devel/errcodes-appendix.html](https://www.postgresql.org/docs/devel/errcodes-appendix.html))
TODO:
We can allow autovacuum processes to bypass this check, simply checking `IsAutoVacuumWorkerProcess()`.
It would be nice to allow manual VACUUM and VACUUM FULL to bypass the check, but it's uneasy to distinguish these operations at the low level.
See issues https://github.com/neondatabase/neon/issues/1245
https://github.com/zenithdb/zenith/issues/1445
https://github.com/neondatabase/neon/issues/1445
TODO:
We should warn users if the limit is soon to be reached.

View File

@@ -10,7 +10,7 @@ Intended to be used in integration tests and in CLI tools for local installation
`/docs`:
Documentation of the Zenith features and concepts.
Documentation of the Neon features and concepts.
Now it is mostly dev documentation.
`/monitoring`:
@@ -19,7 +19,7 @@ TODO
`/pageserver`:
Zenith storage service.
Neon storage service.
The pageserver has a few different duties:
- Store and manage the data.
@@ -54,7 +54,7 @@ PostgreSQL extension that contains functions needed for testing and debugging.
`/safekeeper`:
The zenith WAL service that receives WAL from a primary compute nodes and streams it to the pageserver.
The neon WAL service that receives WAL from a primary compute nodes and streams it to the pageserver.
It acts as a holding area and redistribution center for recently generated WAL.
For more detailed info, see [walservice.md](./walservice.md)
@@ -64,11 +64,6 @@ The workspace_hack crate exists only to pin down some dependencies.
We use [cargo-hakari](https://crates.io/crates/cargo-hakari) for automation.
`/zenith`
Main entry point for the 'zenith' CLI utility.
TODO: Doesn't it belong to control_plane?
`/libs`:
Unites granular neon helper crates under the hood.

View File

@@ -11,7 +11,7 @@ use std::{fmt::Display, str::FromStr};
use once_cell::sync::Lazy;
use regex::{Captures, Regex};
use utils::zid::{NodeId, ZTenantId, ZTenantTimelineId};
use utils::id::{NodeId, TenantId, TenantTimelineId};
/// The subscription kind to the timeline updates from safekeeper.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
@@ -30,13 +30,13 @@ pub enum SubscriptionKind {
/// Get every update in etcd.
All,
/// Get etcd updates for any timeiline of a certain tenant, affected by any operation from any node kind.
TenantTimelines(ZTenantId),
TenantTimelines(TenantId),
/// Get etcd updates for a certain timeline of a tenant, affected by any operation from any node kind.
Timeline(ZTenantTimelineId),
Timeline(TenantTimelineId),
/// Get etcd timeline updates, specific to a certain node kind.
Node(ZTenantTimelineId, NodeKind),
Node(TenantTimelineId, NodeKind),
/// Get etcd timeline updates for a certain operation on specific nodes.
Operation(ZTenantTimelineId, NodeKind, OperationKind),
Operation(TenantTimelineId, NodeKind, OperationKind),
}
/// All kinds of nodes, able to write into etcd.
@@ -67,7 +67,7 @@ static SUBSCRIPTION_FULL_KEY_REGEX: Lazy<Regex> = Lazy::new(|| {
/// No other etcd keys are considered during system's work.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SubscriptionFullKey {
pub id: ZTenantTimelineId,
pub id: TenantTimelineId,
pub node_kind: NodeKind,
pub operation: OperationKind,
pub node_id: NodeId,
@@ -83,7 +83,7 @@ impl SubscriptionKey {
}
/// Subscribes to a given timeline info updates from safekeepers.
pub fn sk_timeline_info(cluster_prefix: String, timeline: ZTenantTimelineId) -> Self {
pub fn sk_timeline_info(cluster_prefix: String, timeline: TenantTimelineId) -> Self {
Self {
cluster_prefix,
kind: SubscriptionKind::Operation(
@@ -97,7 +97,7 @@ impl SubscriptionKey {
/// Subscribes to all timeine updates during specific operations, running on the corresponding nodes.
pub fn operation(
cluster_prefix: String,
timeline: ZTenantTimelineId,
timeline: TenantTimelineId,
node_kind: NodeKind,
operation: OperationKind,
) -> Self {
@@ -175,7 +175,7 @@ impl FromStr for SubscriptionFullKey {
};
Ok(Self {
id: ZTenantTimelineId::new(
id: TenantTimelineId::new(
parse_capture(&key_captures, 1)?,
parse_capture(&key_captures, 2)?,
),
@@ -247,7 +247,7 @@ impl FromStr for SkOperationKind {
#[cfg(test)]
mod tests {
use utils::zid::ZTimelineId;
use utils::id::TimelineId;
use super::*;
@@ -256,9 +256,9 @@ mod tests {
let prefix = "neon";
let node_kind = NodeKind::Safekeeper;
let operation_kind = OperationKind::Safekeeper(SkOperationKind::WalBackup);
let tenant_id = ZTenantId::generate();
let timeline_id = ZTimelineId::generate();
let id = ZTenantTimelineId::new(tenant_id, timeline_id);
let tenant_id = TenantId::generate();
let timeline_id = TimelineId::generate();
let id = TenantTimelineId::new(tenant_id, timeline_id);
let node_id = NodeId(1);
let timeline_subscription_keys = [

View File

@@ -21,7 +21,7 @@ workspace_hack = { version = "0.1", path = "../../workspace_hack" }
[dev-dependencies]
env_logger = "0.9"
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
wal_craft = { path = "wal_craft" }
[build-dependencies]

View File

@@ -11,6 +11,6 @@ clap = "3.0"
env_logger = "0.9"
log = "0.4"
once_cell = "1.13.0"
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres_ffi = { path = "../" }
tempfile = "3.2"

View File

@@ -10,8 +10,8 @@ bincode = "1.3"
bytes = "1.0.1"
hyper = { version = "0.14.7", features = ["full"] }
pin-project-lite = "0.2.7"
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
routerify = "3"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"

View File

@@ -1,11 +1,11 @@
#![allow(unused)]
use criterion::{criterion_group, criterion_main, Criterion};
use utils::zid;
use utils::id;
pub fn bench_zid_stringify(c: &mut Criterion) {
// Can only use public methods.
let ztl = zid::ZTenantTimelineId::generate();
let ztl = id::TenantTimelineId::generate();
c.bench_function("zid.to_string", |b| {
b.iter(|| {

View File

@@ -14,7 +14,7 @@ use jsonwebtoken::{
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DisplayFromStr};
use crate::zid::ZTenantId;
use crate::id::TenantId;
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
@@ -30,23 +30,23 @@ pub enum Scope {
pub struct Claims {
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub tenant_id: Option<ZTenantId>,
pub tenant_id: Option<TenantId>,
pub scope: Scope,
}
impl Claims {
pub fn new(tenant_id: Option<ZTenantId>, scope: Scope) -> Self {
pub fn new(tenant_id: Option<TenantId>, scope: Scope) -> Self {
Self { tenant_id, scope }
}
}
pub fn check_permission(claims: &Claims, tenantid: Option<ZTenantId>) -> Result<()> {
match (&claims.scope, tenantid) {
pub fn check_permission(claims: &Claims, tenant_id: Option<TenantId>) -> Result<()> {
match (&claims.scope, tenant_id) {
(Scope::Tenant, None) => {
bail!("Attempt to access management api with tenant scope. Permission denied")
}
(Scope::Tenant, Some(tenantid)) => {
if claims.tenant_id.unwrap() != tenantid {
(Scope::Tenant, Some(tenant_id)) => {
if claims.tenant_id.unwrap() != tenant_id {
bail!("Tenant id mismatch. Permission denied")
}
Ok(())

View File

@@ -1,6 +1,6 @@
use crate::auth::{self, Claims, JwtAuth};
use crate::http::error;
use crate::zid::ZTenantId;
use crate::id::TenantId;
use anyhow::anyhow;
use hyper::header::AUTHORIZATION;
use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server};
@@ -137,9 +137,9 @@ pub fn auth_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
})
}
pub fn check_permission(req: &Request<Body>, tenantid: Option<ZTenantId>) -> Result<(), ApiError> {
pub fn check_permission(req: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
match req.context::<Claims>() {
Some(claims) => Ok(auth::check_permission(&claims, tenantid)
Some(claims) => Ok(auth::check_permission(&claims, tenant_id)
.map_err(|err| ApiError::Forbidden(err.to_string()))?),
None => Ok(()), // claims is None because auth is disabled
}

View File

@@ -3,6 +3,6 @@ pub mod error;
pub mod json;
pub mod request;
/// Current fast way to apply simple http routing in various Zenith binaries.
/// Current fast way to apply simple http routing in various Neon binaries.
/// Re-exported for sake of uniform approach, that could be later replaced with better alternatives, if needed.
pub use routerify::{ext::RequestExt, RouterBuilder, RouterService};

View File

@@ -4,7 +4,7 @@ use hex::FromHex;
use rand::Rng;
use serde::{Deserialize, Serialize};
/// Zenith ID is a 128-bit random ID.
/// Neon ID is a 128-bit random ID.
/// Used to represent various identifiers. Provides handy utility methods and impls.
///
/// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look
@@ -13,13 +13,13 @@ use serde::{Deserialize, Serialize};
/// Use `#[serde_as(as = "DisplayFromStr")]` to (de)serialize it as hex string instead: `ad50847381e248feaac9876cc71ae418`.
/// Check the `serde_with::serde_as` documentation for options for more complex types.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
struct ZId([u8; 16]);
struct Id([u8; 16]);
impl ZId {
pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> ZId {
impl Id {
pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> Id {
let mut arr = [0u8; 16];
buf.copy_to_slice(&mut arr);
ZId::from(arr)
Id::from(arr)
}
pub fn as_arr(&self) -> [u8; 16] {
@@ -29,7 +29,7 @@ impl ZId {
pub fn generate() -> Self {
let mut tli_buf = [0u8; 16];
rand::thread_rng().fill(&mut tli_buf);
ZId::from(tli_buf)
Id::from(tli_buf)
}
fn hex_encode(&self) -> String {
@@ -44,54 +44,54 @@ impl ZId {
}
}
impl FromStr for ZId {
impl FromStr for Id {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<ZId, Self::Err> {
fn from_str(s: &str) -> Result<Id, Self::Err> {
Self::from_hex(s)
}
}
// this is needed for pretty serialization and deserialization of ZId's using serde integration with hex crate
impl FromHex for ZId {
// this is needed for pretty serialization and deserialization of Id's using serde integration with hex crate
impl FromHex for Id {
type Error = hex::FromHexError;
fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
let mut buf: [u8; 16] = [0u8; 16];
hex::decode_to_slice(hex, &mut buf)?;
Ok(ZId(buf))
Ok(Id(buf))
}
}
impl AsRef<[u8]> for ZId {
impl AsRef<[u8]> for Id {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<[u8; 16]> for ZId {
impl From<[u8; 16]> for Id {
fn from(b: [u8; 16]) -> Self {
ZId(b)
Id(b)
}
}
impl fmt::Display for ZId {
impl fmt::Display for Id {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.hex_encode())
}
}
impl fmt::Debug for ZId {
impl fmt::Debug for Id {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.hex_encode())
}
}
macro_rules! zid_newtype {
macro_rules! id_newtype {
($t:ident) => {
impl $t {
pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> $t {
$t(ZId::get_from_buf(buf))
$t(Id::get_from_buf(buf))
}
pub fn as_arr(&self) -> [u8; 16] {
@@ -99,11 +99,11 @@ macro_rules! zid_newtype {
}
pub fn generate() -> Self {
$t(ZId::generate())
$t(Id::generate())
}
pub const fn from_array(b: [u8; 16]) -> Self {
$t(ZId(b))
$t(Id(b))
}
}
@@ -111,14 +111,14 @@ macro_rules! zid_newtype {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<$t, Self::Err> {
let value = ZId::from_str(s)?;
let value = Id::from_str(s)?;
Ok($t(value))
}
}
impl From<[u8; 16]> for $t {
fn from(b: [u8; 16]) -> Self {
$t(ZId::from(b))
$t(Id::from(b))
}
}
@@ -126,7 +126,7 @@ macro_rules! zid_newtype {
type Error = hex::FromHexError;
fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
Ok($t(ZId::from_hex(hex)?))
Ok($t(Id::from_hex(hex)?))
}
}
@@ -150,7 +150,7 @@ macro_rules! zid_newtype {
};
}
/// Zenith timeline IDs are different from PostgreSQL timeline
/// Neon timeline IDs are different from PostgreSQL timeline
/// IDs. They serve a similar purpose though: they differentiate
/// between different "histories" of the same cluster. However,
/// PostgreSQL timeline IDs are a bit cumbersome, because they are only
@@ -158,7 +158,7 @@ macro_rules! zid_newtype {
/// timeline history. Those limitations mean that we cannot generate a
/// new PostgreSQL timeline ID by just generating a random number. And
/// that in turn is problematic for the "pull/push" workflow, where you
/// have a local copy of a zenith repository, and you periodically sync
/// have a local copy of a Neon repository, and you periodically sync
/// the local changes with a remote server. When you work "detached"
/// from the remote server, you cannot create a PostgreSQL timeline ID
/// that's guaranteed to be different from all existing timelines in
@@ -168,55 +168,55 @@ macro_rules! zid_newtype {
/// branches? If they pick the same one, and later try to push the
/// branches to the same remote server, they will get mixed up.
///
/// To avoid those issues, Zenith has its own concept of timelines that
/// To avoid those issues, Neon has its own concept of timelines that
/// is separate from PostgreSQL timelines, and doesn't have those
/// limitations. A zenith timeline is identified by a 128-bit ID, which
/// limitations. A Neon timeline is identified by a 128-bit ID, which
/// is usually printed out as a hex string.
///
/// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look
/// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`.
/// See [`ZId`] for alternative ways to serialize it.
/// See [`Id`] for alternative ways to serialize it.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Serialize, Deserialize)]
pub struct ZTimelineId(ZId);
pub struct TimelineId(Id);
zid_newtype!(ZTimelineId);
id_newtype!(TimelineId);
/// Zenith Tenant Id represents identifiar of a particular tenant.
/// Neon Tenant Id represents identifiar of a particular tenant.
/// Is used for distinguishing requests and data belonging to different users.
///
/// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look
/// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`.
/// See [`ZId`] for alternative ways to serialize it.
/// See [`Id`] for alternative ways to serialize it.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)]
pub struct ZTenantId(ZId);
pub struct TenantId(Id);
zid_newtype!(ZTenantId);
id_newtype!(TenantId);
// A pair uniquely identifying Zenith instance.
// A pair uniquely identifying Neon instance.
#[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ZTenantTimelineId {
pub tenant_id: ZTenantId,
pub timeline_id: ZTimelineId,
pub struct TenantTimelineId {
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
}
impl ZTenantTimelineId {
pub fn new(tenant_id: ZTenantId, timeline_id: ZTimelineId) -> Self {
ZTenantTimelineId {
impl TenantTimelineId {
pub fn new(tenant_id: TenantId, timeline_id: TimelineId) -> Self {
TenantTimelineId {
tenant_id,
timeline_id,
}
}
pub fn generate() -> Self {
Self::new(ZTenantId::generate(), ZTimelineId::generate())
Self::new(TenantId::generate(), TimelineId::generate())
}
pub fn empty() -> Self {
Self::new(ZTenantId::from([0u8; 16]), ZTimelineId::from([0u8; 16]))
Self::new(TenantId::from([0u8; 16]), TimelineId::from([0u8; 16]))
}
}
impl fmt::Display for ZTenantTimelineId {
impl fmt::Display for TenantTimelineId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.tenant_id, self.timeline_id)
}

View File

@@ -29,7 +29,7 @@ pub mod crashsafe_dir;
pub mod auth;
// utility functions and helper traits for unified unique id generation/serialization etc.
pub mod zid;
pub mod id;
// http endpoint utils
pub mod http;

View File

@@ -63,7 +63,7 @@ pub enum AuthType {
Trust,
MD5,
// This mimics postgres's AuthenticationCleartextPassword but instead of password expects JWT
ZenithJWT,
NeonJWT,
}
impl FromStr for AuthType {
@@ -73,8 +73,8 @@ impl FromStr for AuthType {
match s {
"Trust" => Ok(Self::Trust),
"MD5" => Ok(Self::MD5),
"ZenithJWT" => Ok(Self::ZenithJWT),
_ => bail!("invalid value \"{}\" for auth type", s),
"NeonJWT" => Ok(Self::NeonJWT),
_ => bail!("invalid value \"{s}\" for auth type"),
}
}
}
@@ -84,7 +84,7 @@ impl fmt::Display for AuthType {
f.write_str(match self {
AuthType::Trust => "Trust",
AuthType::MD5 => "MD5",
AuthType::ZenithJWT => "ZenithJWT",
AuthType::NeonJWT => "NeonJWT",
})
}
}
@@ -376,7 +376,7 @@ impl PostgresBackend {
))?;
self.state = ProtoState::Authentication;
}
AuthType::ZenithJWT => {
AuthType::NeonJWT => {
self.write_message(&BeMessage::AuthenticationCleartextPassword)?;
self.state = ProtoState::Authentication;
}
@@ -403,7 +403,7 @@ impl PostgresBackend {
bail!("auth failed: {}", e);
}
}
AuthType::ZenithJWT => {
AuthType::NeonJWT => {
let (_, jwt_response) = m.split_last().context("protocol violation")?;
if let Err(e) = handler.check_auth_jwt(self, jwt_response) {

View File

@@ -346,7 +346,7 @@ impl PostgresBackend {
))?;
self.state = ProtoState::Authentication;
}
AuthType::ZenithJWT => {
AuthType::NeonJWT => {
self.write_message(&BeMessage::AuthenticationCleartextPassword)?;
self.state = ProtoState::Authentication;
}
@@ -374,7 +374,7 @@ impl PostgresBackend {
bail!("auth failed: {}", e);
}
}
AuthType::ZenithJWT => {
AuthType::NeonJWT => {
let (_, jwt_response) = m.split_last().context("protocol violation")?;
if let Err(e) = handler.check_auth_jwt(self, jwt_response) {

View File

@@ -27,10 +27,10 @@ clap = "3.0"
daemonize = "0.4.1"
tokio = { version = "1.17", features = ["process", "sync", "macros", "fs", "rt", "io-util", "time"] }
tokio-util = { version = "0.7.3", features = ["io", "io-util"] }
postgres-types = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
anyhow = { version = "1.0", features = ["backtrace"] }
crc32c = "0.6.0"
thiserror = "1.0"

View File

@@ -12,7 +12,7 @@ use utils::project_git_version;
project_git_version!(GIT_VERSION);
fn main() -> Result<()> {
let arg_matches = App::new("Zenith dump_layerfile utility")
let arg_matches = App::new("Neon dump_layerfile utility")
.about("Dump contents of one layer file, for debugging")
.version(GIT_VERSION)
.arg(

View File

@@ -40,7 +40,7 @@ fn version() -> String {
}
fn main() -> anyhow::Result<()> {
let arg_matches = App::new("Zenith page server")
let arg_matches = App::new("Neon page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.version(&*version())
.arg(
@@ -293,7 +293,7 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
AuthType::NeonJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())

View File

@@ -11,7 +11,7 @@ use utils::{lsn::Lsn, project_git_version};
project_git_version!(GIT_VERSION);
fn main() -> Result<()> {
let arg_matches = App::new("Zenith update metadata utility")
let arg_matches = App::new("Neon update metadata utility")
.about("Dump or update metadata file")
.version(GIT_VERSION)
.arg(

View File

@@ -15,8 +15,8 @@ use toml_edit;
use toml_edit::{Document, Item};
use url::Url;
use utils::{
id::{NodeId, TenantId, TimelineId},
postgres_backend::AuthType,
zid::{NodeId, ZTenantId, ZTimelineId},
};
use crate::tenant::TIMELINES_SEGMENT_NAME;
@@ -342,16 +342,16 @@ impl PageServerConf {
self.workdir.join("tenants")
}
pub fn tenant_path(&self, tenantid: &ZTenantId) -> PathBuf {
self.tenants_path().join(tenantid.to_string())
pub fn tenant_path(&self, tenant_id: &TenantId) -> PathBuf {
self.tenants_path().join(tenant_id.to_string())
}
pub fn timelines_path(&self, tenantid: &ZTenantId) -> PathBuf {
self.tenant_path(tenantid).join(TIMELINES_SEGMENT_NAME)
pub fn timelines_path(&self, tenant_id: &TenantId) -> PathBuf {
self.tenant_path(tenant_id).join(TIMELINES_SEGMENT_NAME)
}
pub fn timeline_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
self.timelines_path(tenantid).join(timelineid.to_string())
pub fn timeline_path(&self, timeline_id: &TimelineId, tenant_id: &TenantId) -> PathBuf {
self.timelines_path(tenant_id).join(timeline_id.to_string())
}
//
@@ -419,7 +419,7 @@ impl PageServerConf {
let mut conf = builder.build().context("invalid config")?;
if conf.auth_type == AuthType::ZenithJWT {
if conf.auth_type == AuthType::NeonJWT {
let auth_validation_public_key_path = conf
.auth_validation_public_key_path
.get_or_insert_with(|| workdir.join("auth_public_key.pem"));

View File

@@ -3,8 +3,8 @@ use std::num::NonZeroU64;
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DisplayFromStr};
use utils::{
id::{NodeId, TenantId, TimelineId},
lsn::Lsn,
zid::{NodeId, ZTenantId, ZTimelineId},
};
use crate::tenant::TenantState;
@@ -14,10 +14,10 @@ use crate::tenant::TenantState;
pub struct TimelineCreateRequest {
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub new_timeline_id: Option<ZTimelineId>,
pub new_timeline_id: Option<TimelineId>,
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_timeline_id: Option<ZTimelineId>,
pub ancestor_timeline_id: Option<TimelineId>,
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_start_lsn: Option<Lsn>,
@@ -28,7 +28,7 @@ pub struct TimelineCreateRequest {
pub struct TenantCreateRequest {
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub new_tenant_id: Option<ZTenantId>,
pub new_tenant_id: Option<TenantId>,
pub checkpoint_distance: Option<u64>,
pub checkpoint_timeout: Option<String>,
pub compaction_target_size: Option<u64>,
@@ -46,7 +46,7 @@ pub struct TenantCreateRequest {
#[serde_as]
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct TenantCreateResponse(#[serde_as(as = "DisplayFromStr")] pub ZTenantId);
pub struct TenantCreateResponse(#[serde_as(as = "DisplayFromStr")] pub TenantId);
#[derive(Serialize)]
pub struct StatusResponse {
@@ -54,7 +54,7 @@ pub struct StatusResponse {
}
impl TenantCreateRequest {
pub fn new(new_tenant_id: Option<ZTenantId>) -> TenantCreateRequest {
pub fn new(new_tenant_id: Option<TenantId>) -> TenantCreateRequest {
TenantCreateRequest {
new_tenant_id,
..Default::default()
@@ -65,7 +65,7 @@ impl TenantCreateRequest {
#[serde_as]
#[derive(Serialize, Deserialize)]
pub struct TenantConfigRequest {
pub tenant_id: ZTenantId,
pub tenant_id: TenantId,
#[serde(default)]
#[serde_as(as = "Option<DisplayFromStr>")]
pub checkpoint_distance: Option<u64>,
@@ -83,7 +83,7 @@ pub struct TenantConfigRequest {
}
impl TenantConfigRequest {
pub fn new(tenant_id: ZTenantId) -> TenantConfigRequest {
pub fn new(tenant_id: TenantId) -> TenantConfigRequest {
TenantConfigRequest {
tenant_id,
checkpoint_distance: None,
@@ -106,7 +106,7 @@ impl TenantConfigRequest {
#[derive(Serialize, Deserialize, Clone)]
pub struct TenantInfo {
#[serde_as(as = "DisplayFromStr")]
pub id: ZTenantId,
pub id: TenantId,
pub state: TenantState,
pub current_physical_size: Option<u64>, // physical size is only included in `tenant_status` endpoint
pub has_in_progress_downloads: Option<bool>,
@@ -116,7 +116,7 @@ pub struct TenantInfo {
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocalTimelineInfo {
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_timeline_id: Option<ZTimelineId>,
pub ancestor_timeline_id: Option<TimelineId>,
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_lsn: Option<Lsn>,
#[serde_as(as = "DisplayFromStr")]
@@ -154,9 +154,9 @@ pub struct RemoteTimelineInfo {
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TimelineInfo {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: ZTenantId,
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: ZTimelineId,
pub timeline_id: TimelineId,
pub local: Option<LocalTimelineInfo>,
pub remote: Option<RemoteTimelineInfo>,
}

View File

@@ -25,8 +25,8 @@ use utils::{
request::parse_request_param,
RequestExt, RouterBuilder,
},
id::{TenantId, TenantTimelineId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTenantTimelineId, ZTimelineId},
};
struct State {
@@ -128,10 +128,10 @@ fn local_timeline_info_from_timeline(
}
fn list_local_timelines(
tenant_id: ZTenantId,
tenant_id: TenantId,
include_non_incremental_logical_size: bool,
include_non_incremental_physical_size: bool,
) -> Result<Vec<(ZTimelineId, LocalTimelineInfo)>> {
) -> Result<Vec<(TimelineId, LocalTimelineInfo)>> {
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
let timelines = tenant.list_timelines();
@@ -156,7 +156,7 @@ async fn status_handler(request: Request<Body>) -> Result<Response<Body>, ApiErr
}
async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
let request_data: TimelineCreateRequest = json_request(&mut request).await?;
check_permission(&request, Some(tenant_id))?;
@@ -164,8 +164,8 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
match timelines::create_timeline(
get_config(&request),
tenant_id,
request_data.new_timeline_id.map(ZTimelineId::from),
request_data.ancestor_timeline_id.map(ZTimelineId::from),
request_data.new_timeline_id.map(TimelineId::from),
request_data.ancestor_timeline_id.map(TimelineId::from),
request_data.ancestor_start_lsn,
).await {
Ok(Some(new_timeline)) => {
@@ -193,7 +193,7 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
}
async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
let include_non_incremental_logical_size =
query_param_present(&request, "include-non-incremental-logical-size");
let include_non_incremental_physical_size =
@@ -229,7 +229,7 @@ async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>,
.remote_index
.read()
.await
.timeline_entry(&ZTenantTimelineId {
.timeline_entry(&TenantTimelineId {
tenant_id,
timeline_id,
})
@@ -257,8 +257,8 @@ fn query_param_present(request: &Request<Body>, param: &str) -> bool {
}
async fn timeline_detail_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let timeline_id: ZTimelineId = parse_request_param(&request, "timeline_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
let include_non_incremental_logical_size =
query_param_present(&request, "include-non-incremental-logical-size");
let include_non_incremental_physical_size =
@@ -289,7 +289,7 @@ async fn timeline_detail_handler(request: Request<Body>) -> Result<Response<Body
let remote_timeline_info = {
let remote_index_read = get_state(&request).remote_index.read().await;
remote_index_read
.timeline_entry(&ZTenantTimelineId {
.timeline_entry(&TenantTimelineId {
tenant_id,
timeline_id,
})
@@ -322,7 +322,7 @@ async fn timeline_detail_handler(request: Request<Body>) -> Result<Response<Body
// TODO makes sense to provide tenant config right away the same way as it handled in tenant_create
async fn tenant_attach_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
check_permission(&request, Some(tenant_id))?;
info!("Handling tenant attach {tenant_id}");
@@ -402,8 +402,8 @@ async fn tenant_attach_handler(request: Request<Body>) -> Result<Response<Body>,
/// for details see comment to `storage_sync::gather_tenant_timelines_index_parts`
async fn gather_tenant_timelines_index_parts(
state: &State,
tenant_id: ZTenantId,
) -> anyhow::Result<Option<Vec<(ZTimelineId, RemoteTimeline)>>> {
tenant_id: TenantId,
) -> anyhow::Result<Option<Vec<(TimelineId, RemoteTimeline)>>> {
let index_parts = match state.remote_storage.as_ref() {
Some(storage) => {
storage_sync::gather_tenant_timelines_index_parts(state.conf, storage, tenant_id).await
@@ -425,8 +425,8 @@ async fn gather_tenant_timelines_index_parts(
}
async fn timeline_delete_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let timeline_id: ZTimelineId = parse_request_param(&request, "timeline_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
@@ -436,7 +436,7 @@ async fn timeline_delete_handler(request: Request<Body>) -> Result<Response<Body
.map_err(ApiError::from_err)?;
let mut remote_index = state.remote_index.write().await;
remote_index.remove_timeline_entry(ZTenantTimelineId {
remote_index.remove_timeline_entry(TenantTimelineId {
tenant_id,
timeline_id,
});
@@ -445,7 +445,7 @@ async fn timeline_delete_handler(request: Request<Body>) -> Result<Response<Body
}
async fn tenant_detach_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
@@ -479,7 +479,7 @@ async fn tenant_list_handler(request: Request<Body>) -> Result<Response<Body>, A
}
async fn tenant_status(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?;
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
check_permission(&request, Some(tenant_id))?;
// if tenant is in progress of downloading it can be absent in global tenant map
@@ -588,8 +588,8 @@ async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Bo
let target_tenant_id = request_data
.new_tenant_id
.map(ZTenantId::from)
.unwrap_or_else(ZTenantId::generate);
.map(TenantId::from)
.unwrap_or_else(TenantId::generate);
let new_tenant_id = tokio::task::spawn_blocking(move || {
let _enter = info_span!("tenant_create", tenant = ?target_tenant_id).entered();

View File

@@ -1,6 +1,6 @@
//!
//! Import data and WAL from a PostgreSQL data directory and WAL segments into
//! a zenith Timeline.
//! a neon Timeline.
//!
use std::fs::File;
use std::io::{Read, Seek, SeekFrom};

View File

@@ -26,7 +26,7 @@ pub mod walredo;
use std::collections::HashMap;
use tracing::info;
use utils::zid::{ZTenantId, ZTimelineId};
use utils::id::{TenantId, TimelineId};
use crate::task_mgr::TaskKind;
@@ -105,12 +105,12 @@ fn exponential_backoff_duration_seconds(n: u32, base_increment: f64, max_seconds
}
/// A newtype to store arbitrary data grouped by tenant and timeline ids.
/// One could use [`utils::zid::ZTenantTimelineId`] for grouping, but that would
/// One could use [`utils::zid::TenantTimelineId`] for grouping, but that would
/// not include the cases where a certain tenant has zero timelines.
/// This is sometimes important: a tenant could be registered during initial load from FS,
/// even if he has no timelines on disk.
#[derive(Debug)]
pub struct TenantTimelineValues<T>(HashMap<ZTenantId, HashMap<ZTimelineId, T>>);
pub struct TenantTimelineValues<T>(HashMap<TenantId, HashMap<TimelineId, T>>);
impl<T> TenantTimelineValues<T> {
fn new() -> Self {
@@ -187,8 +187,8 @@ mod tests {
#[test]
fn tenant_timeline_value_mapping() {
let first_tenant = ZTenantId::generate();
let second_tenant = ZTenantId::generate();
let first_tenant = TenantId::generate();
let second_tenant = TenantId::generate();
assert_ne!(first_tenant, second_tenant);
let mut initial = TenantTimelineValues::new();

View File

@@ -5,7 +5,7 @@ use metrics::{
IntCounter, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec,
};
use once_cell::sync::Lazy;
use utils::zid::{ZTenantId, ZTimelineId};
use utils::id::{TenantId, TimelineId};
/// Prometheus histogram buckets (in seconds) that capture the majority of
/// latencies in the microsecond range but also extend far enough up to distinguish
@@ -327,7 +327,7 @@ pub struct TimelineMetrics {
}
impl TimelineMetrics {
pub fn new(tenant_id: &ZTenantId, timeline_id: &ZTimelineId) -> Self {
pub fn new(tenant_id: &TenantId, timeline_id: &TimelineId) -> Self {
let tenant_id = tenant_id.to_string();
let timeline_id = timeline_id.to_string();
let reconstruct_time_histo = RECONSTRUCT_TIME
@@ -414,6 +414,6 @@ impl Drop for TimelineMetrics {
}
}
pub fn remove_tenant_metrics(tenant_id: &ZTenantId) {
pub fn remove_tenant_metrics(tenant_id: &TenantId) {
let _ = STORAGE_TIME.remove_label_values(&["gc", &tenant_id.to_string(), "-"]);
}

View File

@@ -49,8 +49,8 @@ use anyhow::Context;
use once_cell::sync::OnceCell;
use tracing::error;
use utils::{
id::{TenantId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTimelineId},
};
use crate::repository::Key;
@@ -109,8 +109,8 @@ enum CacheKey {
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
struct MaterializedPageHashKey {
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
key: Key,
}
@@ -308,8 +308,8 @@ impl PageCache {
/// returned page.
pub fn lookup_materialized_page(
&self,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
key: &Key,
lsn: Lsn,
) -> Option<(Lsn, PageReadGuard)> {
@@ -338,8 +338,8 @@ impl PageCache {
///
pub fn memorize_materialized_page(
&self,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
key: Key,
lsn: Lsn,
img: &[u8],

View File

@@ -23,12 +23,12 @@ use tokio_util::io::SyncIoBridge;
use tracing::*;
use utils::{
auth::{self, Claims, JwtAuth, Scope},
id::{TenantId, TimelineId},
lsn::Lsn,
postgres_backend::AuthType,
postgres_backend_async::{self, PostgresBackend},
pq_proto::{BeMessage, FeMessage, RowDescriptor, SINGLE_COL_ROWDESC},
simple_rcu::RcuReadGuard,
zid::{ZTenantId, ZTimelineId},
};
use crate::basebackup;
@@ -123,7 +123,7 @@ impl PagestreamFeMessage {
fn parse(mut body: Bytes) -> anyhow::Result<PagestreamFeMessage> {
// TODO these gets can fail
// these correspond to the ZenithMessageTag enum in pagestore_client.h
// these correspond to the NeonMessageTag enum in pagestore_client.h
//
// TODO: consider using protobuf or serde bincode for less error prone
// serialization.
@@ -370,7 +370,7 @@ struct PageRequestMetrics {
}
impl PageRequestMetrics {
fn new(tenant_id: &ZTenantId, timeline_id: &ZTimelineId) -> Self {
fn new(tenant_id: &TenantId, timeline_id: &TimelineId) -> Self {
let tenant_id = tenant_id.to_string();
let timeline_id = timeline_id.to_string();
@@ -415,8 +415,8 @@ impl PageServerHandler {
async fn handle_pagerequests(
&self,
pgb: &mut PostgresBackend,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
) -> anyhow::Result<()> {
// NOTE: pagerequests handler exits when connection is closed,
// so there is no need to reset the association
@@ -452,11 +452,11 @@ impl PageServerHandler {
None => break, // client disconnected
};
trace!("query: {:?}", copy_data_bytes);
trace!("query: {copy_data_bytes:?}");
let zenith_fe_msg = PagestreamFeMessage::parse(copy_data_bytes)?;
let neon_fe_msg = PagestreamFeMessage::parse(copy_data_bytes)?;
let response = match zenith_fe_msg {
let response = match neon_fe_msg {
PagestreamFeMessage::Exists(req) => {
let _timer = metrics.get_rel_exists.start_timer();
self.handle_get_rel_exists_request(&timeline, &req).await
@@ -494,8 +494,8 @@ impl PageServerHandler {
async fn handle_import_basebackup(
&self,
pgb: &mut PostgresBackend,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
base_lsn: Lsn,
_end_lsn: Lsn,
) -> anyhow::Result<()> {
@@ -557,8 +557,8 @@ impl PageServerHandler {
async fn handle_import_wal(
&self,
pgb: &mut PostgresBackend,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
start_lsn: Lsn,
end_lsn: Lsn,
) -> anyhow::Result<()> {
@@ -750,8 +750,8 @@ impl PageServerHandler {
async fn handle_basebackup_request(
&self,
pgb: &mut PostgresBackend,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
lsn: Option<Lsn>,
prev_lsn: Option<Lsn>,
full_backup: bool,
@@ -792,7 +792,7 @@ impl PageServerHandler {
// when accessing management api supply None as an argument
// when using to authorize tenant pass corresponding tenant id
fn check_permission(&self, tenant_id: Option<ZTenantId>) -> Result<()> {
fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<()> {
if self.auth.is_none() {
// auth is set to Trust, nothing to check so just return ok
return Ok(());
@@ -815,7 +815,7 @@ impl postgres_backend_async::Handler for PageServerHandler {
_pgb: &mut PostgresBackend,
jwt_response: &[u8],
) -> anyhow::Result<()> {
// this unwrap is never triggered, because check_auth_jwt only called when auth_type is ZenithJWT
// this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
// which requires auth to be present
let data = self
.auth
@@ -853,8 +853,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
params.len() == 2,
"invalid param number for pagestream command"
);
let tenant_id = ZTenantId::from_str(params[0])?;
let timeline_id = ZTimelineId::from_str(params[1])?;
let tenant_id = TenantId::from_str(params[0])?;
let timeline_id = TimelineId::from_str(params[1])?;
self.check_permission(Some(tenant_id))?;
@@ -869,8 +869,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
"invalid param number for basebackup command"
);
let tenant_id = ZTenantId::from_str(params[0])?;
let timeline_id = ZTimelineId::from_str(params[1])?;
let tenant_id = TenantId::from_str(params[0])?;
let timeline_id = TimelineId::from_str(params[1])?;
self.check_permission(Some(tenant_id))?;
@@ -895,8 +895,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
"invalid param number for get_last_record_rlsn command"
);
let tenant_id = ZTenantId::from_str(params[0])?;
let timeline_id = ZTimelineId::from_str(params[1])?;
let tenant_id = TenantId::from_str(params[0])?;
let timeline_id = TimelineId::from_str(params[1])?;
self.check_permission(Some(tenant_id))?;
let timeline = get_local_timeline(tenant_id, timeline_id)?;
@@ -923,8 +923,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
"invalid param number for fullbackup command"
);
let tenant_id = ZTenantId::from_str(params[0])?;
let timeline_id = ZTimelineId::from_str(params[1])?;
let tenant_id = TenantId::from_str(params[0])?;
let timeline_id = TimelineId::from_str(params[1])?;
// The caller is responsible for providing correct lsn and prev_lsn.
let lsn = if params.len() > 2 {
@@ -959,8 +959,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
let (_, params_raw) = query_string.split_at("import basebackup ".len());
let params = params_raw.split_whitespace().collect::<Vec<_>>();
ensure!(params.len() == 4);
let tenant_id = ZTenantId::from_str(params[0])?;
let timeline_id = ZTimelineId::from_str(params[1])?;
let tenant_id = TenantId::from_str(params[0])?;
let timeline_id = TimelineId::from_str(params[1])?;
let base_lsn = Lsn::from_str(params[2])?;
let end_lsn = Lsn::from_str(params[3])?;
@@ -984,8 +984,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
let (_, params_raw) = query_string.split_at("import wal ".len());
let params = params_raw.split_whitespace().collect::<Vec<_>>();
ensure!(params.len() == 4);
let tenant_id = ZTenantId::from_str(params[0])?;
let timeline_id = ZTimelineId::from_str(params[1])?;
let tenant_id = TenantId::from_str(params[0])?;
let timeline_id = TimelineId::from_str(params[1])?;
let start_lsn = Lsn::from_str(params[2])?;
let end_lsn = Lsn::from_str(params[3])?;
@@ -1035,7 +1035,7 @@ impl postgres_backend_async::Handler for PageServerHandler {
let (_, params_raw) = query_string.split_at("show ".len());
let params = params_raw.split(' ').collect::<Vec<_>>();
ensure!(params.len() == 1, "invalid param number for config command");
let tenant_id = ZTenantId::from_str(params[0])?;
let tenant_id = TenantId::from_str(params[0])?;
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
pgb.write_message(&BeMessage::RowDescription(&[
RowDescriptor::int8_col(b"checkpoint_distance"),
@@ -1087,8 +1087,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
.captures(query_string)
.with_context(|| format!("invalid do_gc: '{}'", query_string))?;
let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?;
let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?;
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
@@ -1131,8 +1131,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
.captures(query_string)
.with_context(|| format!("Invalid compact: '{}'", query_string))?;
let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?;
let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?;
let timeline = get_local_timeline(tenant_id, timeline_id)?;
timeline.compact()?;
@@ -1148,8 +1148,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
.captures(query_string)
.with_context(|| format!("invalid checkpoint command: '{}'", query_string))?;
let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?;
let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?;
let timeline = get_local_timeline(tenant_id, timeline_id)?;
// Checkpoint the timeline and also compact it (due to `CheckpointConfig::Forced`).
@@ -1166,8 +1166,8 @@ impl postgres_backend_async::Handler for PageServerHandler {
.captures(query_string)
.with_context(|| format!("invalid get_lsn_by_timestamp: '{}'", query_string))?;
let tenant_id = ZTenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = ZTimelineId::from_str(caps.get(2).unwrap().as_str())?;
let tenant_id = TenantId::from_str(caps.get(1).unwrap().as_str())?;
let timeline_id = TimelineId::from_str(caps.get(2).unwrap().as_str())?;
let timeline = get_local_timeline(tenant_id, timeline_id)?;
let timestamp = humantime::parse_rfc3339(caps.get(3).unwrap().as_str())?;
@@ -1192,7 +1192,7 @@ impl postgres_backend_async::Handler for PageServerHandler {
}
}
fn get_local_timeline(tenant_id: ZTenantId, timeline_id: ZTimelineId) -> Result<Arc<Timeline>> {
fn get_local_timeline(tenant_id: TenantId, timeline_id: TimelineId) -> Result<Arc<Timeline>> {
tenant_mgr::get_tenant(tenant_id, true).and_then(|tenant| tenant.get_timeline(timeline_id))
}

View File

@@ -10,7 +10,7 @@ use crate::keyspace::{KeySpace, KeySpaceAccum};
use crate::reltag::{RelTag, SlruKind};
use crate::repository::*;
use crate::tenant::Timeline;
use crate::walrecord::ZenithWalRecord;
use crate::walrecord::NeonWalRecord;
use anyhow::{bail, ensure, Result};
use bytes::{Buf, Bytes};
use postgres_ffi::v14::pg_constants;
@@ -570,7 +570,7 @@ impl<'a> DatadirModification<'a> {
&mut self,
rel: RelTag,
blknum: BlockNumber,
rec: ZenithWalRecord,
rec: NeonWalRecord,
) -> Result<()> {
ensure!(rel.relnode != 0, "invalid relnode");
self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec));
@@ -583,7 +583,7 @@ impl<'a> DatadirModification<'a> {
kind: SlruKind,
segno: u32,
blknum: BlockNumber,
rec: ZenithWalRecord,
rec: NeonWalRecord,
) -> Result<()> {
self.put(
slru_block_to_key(kind, segno, blknum),
@@ -1401,7 +1401,7 @@ fn is_slru_block_key(key: Key) -> bool {
#[cfg(test)]
pub fn create_test_timeline(
tenant: &crate::tenant::Tenant,
timeline_id: utils::zid::ZTimelineId,
timeline_id: utils::id::TimelineId,
) -> Result<std::sync::Arc<Timeline>> {
let tline = tenant.create_empty_timeline(timeline_id, Lsn(8))?;
let mut m = tline.begin_modification(Lsn(8));

View File

@@ -1,4 +1,4 @@
use crate::walrecord::ZenithWalRecord;
use crate::walrecord::NeonWalRecord;
use anyhow::{bail, Result};
use byteorder::{ByteOrder, BE};
use bytes::Bytes;
@@ -157,7 +157,7 @@ pub enum Value {
/// replayed get the full value. Replaying the WAL record
/// might need a previous version of the value (if will_init()
/// returns false), or it may be replayed stand-alone (true).
WalRecord(ZenithWalRecord),
WalRecord(NeonWalRecord),
}
impl Value {

View File

@@ -68,7 +68,7 @@
//! Pageserver maintains similar to the local file structure remotely: all layer files are uploaded with the same names under the same directory structure.
//! Yet instead of keeping the `metadata` file remotely, we wrap it with more data in [`IndexPart`], containing the list of remote files.
//! This file gets read to populate the cache, if the remote timeline data is missing from it and gets updated after every successful download.
//! This way, we optimize S3 storage access by not running the `S3 list` command that could be expencive and slow: knowing both [`ZTenantId`] and [`ZTimelineId`],
//! This way, we optimize S3 storage access by not running the `S3 list` command that could be expencive and slow: knowing both [`TenantId`] and [`TimelineId`],
//! we can always reconstruct the path to the timeline, use this to get the same path on the remote storage and retrieve its shard contents, if needed, same as any layer files.
//!
//! By default, pageserver reads the remote storage index data only for timelines located locally, to synchronize those, if needed.
@@ -183,7 +183,7 @@ use crate::{
TenantTimelineValues,
};
use utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId};
use utils::id::{TenantId, TenantTimelineId, TimelineId};
use self::download::download_index_parts;
pub use self::download::gather_tenant_timelines_index_parts;
@@ -227,7 +227,7 @@ pub struct SyncStartupData {
struct SyncQueue {
max_timelines_per_batch: NonZeroUsize,
queue: Mutex<VecDeque<(ZTenantTimelineId, SyncTask)>>,
queue: Mutex<VecDeque<(TenantTimelineId, SyncTask)>>,
condvar: Condvar,
}
@@ -241,7 +241,7 @@ impl SyncQueue {
}
/// Queue a new task
fn push(&self, sync_id: ZTenantTimelineId, new_task: SyncTask) {
fn push(&self, sync_id: TenantTimelineId, new_task: SyncTask) {
let mut q = self.queue.lock().unwrap();
q.push_back((sync_id, new_task));
@@ -254,7 +254,7 @@ impl SyncQueue {
/// A timeline has to care to not to delete certain layers from the remote storage before the corresponding uploads happen.
/// Other than that, due to "immutable" nature of the layers, the order of their deletion/uploading/downloading does not matter.
/// Hence, we merge the layers together into single task per timeline and run those concurrently (with the deletion happening only after successful uploading).
fn next_task_batch(&self) -> (HashMap<ZTenantTimelineId, SyncTaskBatch>, usize) {
fn next_task_batch(&self) -> (HashMap<TenantTimelineId, SyncTaskBatch>, usize) {
// Wait for the first task in blocking fashion
let mut q = self.queue.lock().unwrap();
while q.is_empty() {
@@ -488,8 +488,8 @@ struct LayersDeletion {
///
/// Ensure that the loop is started otherwise the task is never processed.
pub fn schedule_layer_upload(
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
layers_to_upload: HashSet<PathBuf>,
metadata: Option<TimelineMetadata>,
) {
@@ -501,7 +501,7 @@ pub fn schedule_layer_upload(
}
};
sync_queue.push(
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
},
@@ -519,8 +519,8 @@ pub fn schedule_layer_upload(
///
/// Ensure that the loop is started otherwise the task is never processed.
pub fn schedule_layer_delete(
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
layers_to_delete: HashSet<PathBuf>,
) {
let sync_queue = match SYNC_QUEUE.get() {
@@ -531,7 +531,7 @@ pub fn schedule_layer_delete(
}
};
sync_queue.push(
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
},
@@ -551,7 +551,7 @@ pub fn schedule_layer_delete(
/// On any failure, the task gets retried, omitting already downloaded layers.
///
/// Ensure that the loop is started otherwise the task is never processed.
pub fn schedule_layer_download(tenant_id: ZTenantId, timeline_id: ZTimelineId) {
pub fn schedule_layer_download(tenant_id: TenantId, timeline_id: TimelineId) {
debug!("Scheduling layer download for tenant {tenant_id}, timeline {timeline_id}");
let sync_queue = match SYNC_QUEUE.get() {
Some(queue) => queue,
@@ -561,7 +561,7 @@ pub fn schedule_layer_download(tenant_id: ZTenantId, timeline_id: ZTimelineId) {
}
};
sync_queue.push(
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
},
@@ -604,7 +604,7 @@ pub fn spawn_storage_sync_task(
let _ = empty_tenants.0.entry(tenant_id).or_default();
} else {
for (timeline_id, timeline_data) in timeline_data {
let id = ZTenantTimelineId::new(tenant_id, timeline_id);
let id = TenantTimelineId::new(tenant_id, timeline_id);
keys_for_index_part_downloads.insert(id);
timelines_to_sync.insert(id, timeline_data);
}
@@ -766,9 +766,9 @@ async fn process_batches(
max_sync_errors: NonZeroU32,
storage: GenericRemoteStorage,
index: &RemoteIndex,
batched_tasks: HashMap<ZTenantTimelineId, SyncTaskBatch>,
batched_tasks: HashMap<TenantTimelineId, SyncTaskBatch>,
sync_queue: &SyncQueue,
) -> HashSet<ZTenantId> {
) -> HashSet<TenantId> {
let mut sync_results = batched_tasks
.into_iter()
.map(|(sync_id, batch)| {
@@ -808,7 +808,7 @@ async fn process_sync_task_batch(
conf: &'static PageServerConf,
(storage, index, sync_queue): (GenericRemoteStorage, RemoteIndex, &SyncQueue),
max_sync_errors: NonZeroU32,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
batch: SyncTaskBatch,
) -> DownloadStatus {
let sync_start = Instant::now();
@@ -949,7 +949,7 @@ async fn download_timeline_data(
conf: &'static PageServerConf,
(storage, index, sync_queue): (&GenericRemoteStorage, &RemoteIndex, &SyncQueue),
current_remote_timeline: Option<&RemoteTimeline>,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
new_download_data: SyncData<LayersDownload>,
sync_start: Instant,
task_name: &str,
@@ -999,7 +999,7 @@ async fn download_timeline_data(
async fn update_local_metadata(
conf: &'static PageServerConf,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
remote_timeline: Option<&RemoteTimeline>,
) -> anyhow::Result<()> {
let remote_metadata = match remote_timeline {
@@ -1031,7 +1031,7 @@ async fn update_local_metadata(
info!("Updating local timeline metadata from remote timeline: local disk_consistent_lsn={local_lsn:?}, remote disk_consistent_lsn={remote_lsn}");
// clone because spawn_blocking requires static lifetime
let cloned_metadata = remote_metadata.to_owned();
let ZTenantTimelineId {
let TenantTimelineId {
tenant_id,
timeline_id,
} = sync_id;
@@ -1061,7 +1061,7 @@ async fn update_local_metadata(
async fn delete_timeline_data(
conf: &'static PageServerConf,
(storage, index, sync_queue): (&GenericRemoteStorage, &RemoteIndex, &SyncQueue),
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
mut new_delete_data: SyncData<LayersDeletion>,
sync_start: Instant,
task_name: &str,
@@ -1104,7 +1104,7 @@ async fn upload_timeline_data(
conf: &'static PageServerConf,
(storage, index, sync_queue): (&GenericRemoteStorage, &RemoteIndex, &SyncQueue),
current_remote_timeline: Option<&RemoteTimeline>,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
new_upload_data: SyncData<LayersUpload>,
sync_start: Instant,
task_name: &str,
@@ -1163,7 +1163,7 @@ async fn update_remote_data(
conf: &'static PageServerConf,
storage: &GenericRemoteStorage,
index: &RemoteIndex,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
update: RemoteDataUpdate<'_>,
) -> anyhow::Result<()> {
let updated_remote_timeline = {
@@ -1261,7 +1261,7 @@ async fn validate_task_retries(
fn schedule_first_sync_tasks(
index: &mut RemoteTimelineIndex,
sync_queue: &SyncQueue,
local_timeline_files: HashMap<ZTenantTimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
local_timeline_files: HashMap<TenantTimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
) -> TenantTimelineValues<LocalTimelineInitStatus> {
let mut local_timeline_init_statuses = TenantTimelineValues::new();
@@ -1331,8 +1331,8 @@ fn schedule_first_sync_tasks(
/// bool in return value stands for awaits_download
fn compare_local_and_remote_timeline(
new_sync_tasks: &mut VecDeque<(ZTenantTimelineId, SyncTask)>,
sync_id: ZTenantTimelineId,
new_sync_tasks: &mut VecDeque<(TenantTimelineId, SyncTask)>,
sync_id: TenantTimelineId,
local_metadata: TimelineMetadata,
local_files: HashSet<PathBuf>,
remote_entry: &RemoteTimeline,
@@ -1377,7 +1377,7 @@ fn compare_local_and_remote_timeline(
}
fn register_sync_status(
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
sync_start: Instant,
sync_name: &str,
sync_status: Option<bool>,
@@ -1409,7 +1409,7 @@ mod test_utils {
pub(super) async fn create_local_timeline(
harness: &TenantHarness<'_>,
timeline_id: ZTimelineId,
timeline_id: TimelineId,
filenames: &[&str],
metadata: TimelineMetadata,
) -> anyhow::Result<LayersUpload> {
@@ -1454,8 +1454,8 @@ mod tests {
use super::*;
const TEST_SYNC_ID: ZTenantTimelineId = ZTenantTimelineId {
tenant_id: ZTenantId::from_array(hex!("11223344556677881122334455667788")),
const TEST_SYNC_ID: TenantTimelineId = TenantTimelineId {
tenant_id: TenantId::from_array(hex!("11223344556677881122334455667788")),
timeline_id: TIMELINE_ID,
};
@@ -1464,12 +1464,12 @@ mod tests {
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
assert_eq!(sync_queue.len(), 0);
let sync_id_2 = ZTenantTimelineId {
tenant_id: ZTenantId::from_array(hex!("22223344556677881122334455667788")),
let sync_id_2 = TenantTimelineId {
tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")),
timeline_id: TIMELINE_ID,
};
let sync_id_3 = ZTenantTimelineId {
tenant_id: ZTenantId::from_array(hex!("33223344556677881122334455667788")),
let sync_id_3 = TenantTimelineId {
tenant_id: TenantId::from_array(hex!("33223344556677881122334455667788")),
timeline_id: TIMELINE_ID,
};
assert!(sync_id_2 != TEST_SYNC_ID);
@@ -1591,8 +1591,8 @@ mod tests {
layers_to_skip: HashSet::from([PathBuf::from("sk4")]),
};
let sync_id_2 = ZTenantTimelineId {
tenant_id: ZTenantId::from_array(hex!("22223344556677881122334455667788")),
let sync_id_2 = TenantTimelineId {
tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")),
timeline_id: TIMELINE_ID,
};
assert!(sync_id_2 != TEST_SYNC_ID);

View File

@@ -8,7 +8,7 @@ use tracing::{debug, error, info};
use crate::storage_sync::{SyncQueue, SyncTask};
use remote_storage::GenericRemoteStorage;
use utils::zid::ZTenantTimelineId;
use utils::id::TenantTimelineId;
use super::{LayersDeletion, SyncData};
@@ -17,7 +17,7 @@ use super::{LayersDeletion, SyncData};
pub(super) async fn delete_timeline_layers(
storage: &GenericRemoteStorage,
sync_queue: &SyncQueue,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
mut delete_data: SyncData<LayersDeletion>,
) -> bool {
if !delete_data.data.deletion_registered {
@@ -123,7 +123,7 @@ mod tests {
async fn delete_timeline_negative() -> anyhow::Result<()> {
let harness = TenantHarness::create("delete_timeline_negative")?;
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let storage = GenericRemoteStorage::new(LocalFs::new(
tempdir()?.path().to_path_buf(),
harness.conf.workdir.clone(),
@@ -157,7 +157,7 @@ mod tests {
let harness = TenantHarness::create("delete_timeline")?;
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let layer_files = ["a", "b", "c", "d"];
let storage = GenericRemoteStorage::new(LocalFs::new(
tempdir()?.path().to_path_buf(),

View File

@@ -20,7 +20,7 @@ use crate::{
config::PageServerConf, storage_sync::SyncTask, tenant::metadata::metadata_path,
TEMP_FILE_SUFFIX,
};
use utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId};
use utils::id::{TenantId, TenantTimelineId, TimelineId};
use super::{
index::{IndexPart, RemoteTimeline},
@@ -33,14 +33,14 @@ use super::{
// When data is received succesfully without errors Present variant is used.
pub enum TenantIndexParts {
Poisoned {
present: HashMap<ZTimelineId, IndexPart>,
missing: HashSet<ZTimelineId>,
present: HashMap<TimelineId, IndexPart>,
missing: HashSet<TimelineId>,
},
Present(HashMap<ZTimelineId, IndexPart>),
Present(HashMap<TimelineId, IndexPart>),
}
impl TenantIndexParts {
fn add_poisoned(&mut self, timeline_id: ZTimelineId) {
fn add_poisoned(&mut self, timeline_id: TimelineId) {
match self {
TenantIndexParts::Poisoned { missing, .. } => {
missing.insert(timeline_id);
@@ -64,9 +64,9 @@ impl Default for TenantIndexParts {
pub async fn download_index_parts(
conf: &'static PageServerConf,
storage: &GenericRemoteStorage,
keys: HashSet<ZTenantTimelineId>,
) -> HashMap<ZTenantId, TenantIndexParts> {
let mut index_parts: HashMap<ZTenantId, TenantIndexParts> = HashMap::new();
keys: HashSet<TenantTimelineId>,
) -> HashMap<TenantId, TenantIndexParts> {
let mut index_parts: HashMap<TenantId, TenantIndexParts> = HashMap::new();
let mut part_downloads = keys
.into_iter()
@@ -112,8 +112,8 @@ pub async fn download_index_parts(
pub async fn gather_tenant_timelines_index_parts(
conf: &'static PageServerConf,
storage: &GenericRemoteStorage,
tenant_id: ZTenantId,
) -> anyhow::Result<HashMap<ZTimelineId, IndexPart>> {
tenant_id: TenantId,
) -> anyhow::Result<HashMap<TimelineId, IndexPart>> {
let tenant_path = conf.timelines_path(&tenant_id);
let timeline_sync_ids = get_timeline_sync_ids(storage, &tenant_path, tenant_id)
.await
@@ -135,7 +135,7 @@ pub async fn gather_tenant_timelines_index_parts(
async fn download_index_part(
conf: &'static PageServerConf,
storage: &GenericRemoteStorage,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
) -> Result<IndexPart, DownloadError> {
let index_part_path = metadata_path(conf, sync_id.timeline_id, sync_id.tenant_id)
.with_file_name(IndexPart::FILE_NAME);
@@ -197,7 +197,7 @@ pub(super) async fn download_timeline_layers<'a>(
storage: &'a GenericRemoteStorage,
sync_queue: &'a SyncQueue,
remote_timeline: Option<&'a RemoteTimeline>,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
mut download_data: SyncData<LayersDownload>,
) -> DownloadedTimeline {
let remote_timeline = match remote_timeline {
@@ -335,7 +335,7 @@ pub(super) async fn download_timeline_layers<'a>(
}
// fsync timeline directory which is a parent directory for downloaded files
let ZTenantTimelineId {
let TenantTimelineId {
tenant_id,
timeline_id,
} = &sync_id;
@@ -366,8 +366,8 @@ pub(super) async fn download_timeline_layers<'a>(
async fn get_timeline_sync_ids(
storage: &GenericRemoteStorage,
tenant_path: &Path,
tenant_id: ZTenantId,
) -> anyhow::Result<HashSet<ZTenantTimelineId>> {
tenant_id: TenantId,
) -> anyhow::Result<HashSet<TenantTimelineId>> {
let tenant_storage_path = storage.remote_object_id(tenant_path).with_context(|| {
format!(
"Failed to get tenant storage path for local path '{}'",
@@ -395,11 +395,11 @@ async fn get_timeline_sync_ids(
anyhow::anyhow!("failed to get timeline id for remote tenant {tenant_id}")
})?;
let timeline_id: ZTimelineId = object_name.parse().with_context(|| {
let timeline_id: TimelineId = object_name.parse().with_context(|| {
format!("failed to parse object name into timeline id '{object_name}'")
})?;
sync_ids.insert(ZTenantTimelineId {
sync_ids.insert(TenantTimelineId {
tenant_id,
timeline_id,
});
@@ -439,7 +439,7 @@ mod tests {
let harness = TenantHarness::create("download_timeline")?;
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let layer_files = ["a", "b", "layer_to_skip", "layer_to_keep_locally"];
let storage = GenericRemoteStorage::new(LocalFs::new(
tempdir()?.path().to_owned(),
@@ -539,7 +539,7 @@ mod tests {
async fn download_timeline_negatives() -> anyhow::Result<()> {
let harness = TenantHarness::create("download_timeline_negatives")?;
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let storage = GenericRemoteStorage::new(LocalFs::new(
tempdir()?.path().to_owned(),
harness.conf.workdir.clone(),
@@ -597,7 +597,7 @@ mod tests {
#[tokio::test]
async fn test_download_index_part() -> anyhow::Result<()> {
let harness = TenantHarness::create("test_download_index_part")?;
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let storage = GenericRemoteStorage::new(LocalFs::new(
tempdir()?.path().to_owned(),

View File

@@ -17,8 +17,8 @@ use tracing::log::warn;
use crate::{config::PageServerConf, tenant::metadata::TimelineMetadata};
use utils::{
id::{TenantId, TenantTimelineId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTenantTimelineId, ZTimelineId},
};
use super::download::TenantIndexParts;
@@ -49,7 +49,7 @@ impl RelativePath {
}
#[derive(Debug, Clone, Default)]
pub struct TenantEntry(HashMap<ZTimelineId, RemoteTimeline>);
pub struct TenantEntry(HashMap<TimelineId, RemoteTimeline>);
impl TenantEntry {
pub fn has_in_progress_downloads(&self) -> bool {
@@ -59,7 +59,7 @@ impl TenantEntry {
}
impl Deref for TenantEntry {
type Target = HashMap<ZTimelineId, RemoteTimeline>;
type Target = HashMap<TimelineId, RemoteTimeline>;
fn deref(&self) -> &Self::Target {
&self.0
@@ -72,8 +72,8 @@ impl DerefMut for TenantEntry {
}
}
impl From<HashMap<ZTimelineId, RemoteTimeline>> for TenantEntry {
fn from(inner: HashMap<ZTimelineId, RemoteTimeline>) -> Self {
impl From<HashMap<TimelineId, RemoteTimeline>> for TenantEntry {
fn from(inner: HashMap<TimelineId, RemoteTimeline>) -> Self {
Self(inner)
}
}
@@ -81,7 +81,7 @@ impl From<HashMap<ZTimelineId, RemoteTimeline>> for TenantEntry {
/// An index to track tenant files that exist on the remote storage.
#[derive(Debug, Clone, Default)]
pub struct RemoteTimelineIndex {
entries: HashMap<ZTenantId, TenantEntry>,
entries: HashMap<TenantId, TenantEntry>,
}
/// A wrapper to synchronize the access to the index, should be created and used before dealing with any [`RemoteTimelineIndex`].
@@ -91,9 +91,9 @@ pub struct RemoteIndex(Arc<RwLock<RemoteTimelineIndex>>);
impl RemoteIndex {
pub fn from_parts(
conf: &'static PageServerConf,
index_parts: HashMap<ZTenantId, TenantIndexParts>,
index_parts: HashMap<TenantId, TenantIndexParts>,
) -> anyhow::Result<Self> {
let mut entries: HashMap<ZTenantId, TenantEntry> = HashMap::new();
let mut entries: HashMap<TenantId, TenantEntry> = HashMap::new();
for (tenant_id, index_parts) in index_parts {
match index_parts {
@@ -136,30 +136,30 @@ impl Clone for RemoteIndex {
impl RemoteTimelineIndex {
pub fn timeline_entry(
&self,
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
}: &ZTenantTimelineId,
}: &TenantTimelineId,
) -> Option<&RemoteTimeline> {
self.entries.get(tenant_id)?.get(timeline_id)
}
pub fn timeline_entry_mut(
&mut self,
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
}: &ZTenantTimelineId,
}: &TenantTimelineId,
) -> Option<&mut RemoteTimeline> {
self.entries.get_mut(tenant_id)?.get_mut(timeline_id)
}
pub fn add_timeline_entry(
&mut self,
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
}: ZTenantTimelineId,
}: TenantTimelineId,
entry: RemoteTimeline,
) {
self.entries
@@ -170,10 +170,10 @@ impl RemoteTimelineIndex {
pub fn remove_timeline_entry(
&mut self,
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
}: ZTenantTimelineId,
}: TenantTimelineId,
) -> Option<RemoteTimeline> {
self.entries
.entry(tenant_id)
@@ -181,25 +181,25 @@ impl RemoteTimelineIndex {
.remove(&timeline_id)
}
pub fn tenant_entry(&self, tenant_id: &ZTenantId) -> Option<&TenantEntry> {
pub fn tenant_entry(&self, tenant_id: &TenantId) -> Option<&TenantEntry> {
self.entries.get(tenant_id)
}
pub fn tenant_entry_mut(&mut self, tenant_id: &ZTenantId) -> Option<&mut TenantEntry> {
pub fn tenant_entry_mut(&mut self, tenant_id: &TenantId) -> Option<&mut TenantEntry> {
self.entries.get_mut(tenant_id)
}
pub fn add_tenant_entry(&mut self, tenant_id: ZTenantId) -> &mut TenantEntry {
pub fn add_tenant_entry(&mut self, tenant_id: TenantId) -> &mut TenantEntry {
self.entries.entry(tenant_id).or_default()
}
pub fn remove_tenant_entry(&mut self, tenant_id: &ZTenantId) -> Option<TenantEntry> {
pub fn remove_tenant_entry(&mut self, tenant_id: &TenantId) -> Option<TenantEntry> {
self.entries.remove(tenant_id)
}
pub fn set_awaits_download(
&mut self,
id: &ZTenantTimelineId,
id: &TenantTimelineId,
awaits_download: bool,
) -> anyhow::Result<()> {
self.timeline_entry_mut(id)

View File

@@ -8,7 +8,7 @@ use remote_storage::GenericRemoteStorage;
use tokio::fs;
use tracing::{debug, error, info, warn};
use utils::zid::ZTenantTimelineId;
use utils::id::TenantTimelineId;
use super::{
index::{IndexPart, RemoteTimeline},
@@ -21,7 +21,7 @@ use crate::{config::PageServerConf, storage_sync::SyncTask, tenant::metadata::me
pub(super) async fn upload_index_part(
conf: &'static PageServerConf,
storage: &GenericRemoteStorage,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
index_part: IndexPart,
) -> anyhow::Result<()> {
let index_part_bytes = serde_json::to_vec(&index_part)
@@ -58,7 +58,7 @@ pub(super) async fn upload_timeline_layers<'a>(
storage: &'a GenericRemoteStorage,
sync_queue: &SyncQueue,
remote_timeline: Option<&'a RemoteTimeline>,
sync_id: ZTenantTimelineId,
sync_id: TenantTimelineId,
mut upload_data: SyncData<LayersUpload>,
) -> UploadedTimeline {
let upload = &mut upload_data.data;
@@ -213,7 +213,7 @@ mod tests {
async fn regular_layer_upload() -> anyhow::Result<()> {
let harness = TenantHarness::create("regular_layer_upload")?;
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let layer_files = ["a", "b"];
let storage = GenericRemoteStorage::new(LocalFs::new(
@@ -301,7 +301,7 @@ mod tests {
async fn layer_upload_after_local_fs_update() -> anyhow::Result<()> {
let harness = TenantHarness::create("layer_upload_after_local_fs_update")?;
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let layer_files = ["a1", "b1"];
let storage = GenericRemoteStorage::new(LocalFs::new(
@@ -395,7 +395,7 @@ mod tests {
#[tokio::test]
async fn test_upload_index_part() -> anyhow::Result<()> {
let harness = TenantHarness::create("test_upload_index_part")?;
let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let sync_id = TenantTimelineId::new(harness.tenant_id, TIMELINE_ID);
let storage = GenericRemoteStorage::new(LocalFs::new(
tempdir()?.path().to_owned(),

View File

@@ -51,7 +51,7 @@ use tracing::{debug, error, info, warn};
use once_cell::sync::Lazy;
use utils::zid::{ZTenantId, ZTimelineId};
use utils::id::{TenantId, TimelineId};
use crate::shutdown_pageserver;
@@ -210,8 +210,8 @@ pub enum TaskKind {
#[derive(Default)]
struct MutableTaskState {
/// Tenant and timeline that this task is associated with.
tenant_id: Option<ZTenantId>,
timeline_id: Option<ZTimelineId>,
tenant_id: Option<TenantId>,
timeline_id: Option<TimelineId>,
/// Handle for waiting for the task to exit. It can be None, if the
/// the task has already exited.
@@ -238,8 +238,8 @@ struct PageServerTask {
pub fn spawn<F>(
runtime: &tokio::runtime::Handle,
kind: TaskKind,
tenant_id: Option<ZTenantId>,
timeline_id: Option<ZTimelineId>,
tenant_id: Option<TenantId>,
timeline_id: Option<TimelineId>,
name: &str,
shutdown_process_on_error: bool,
future: F,
@@ -371,7 +371,7 @@ async fn task_finish(
}
// expected to be called from the task of the given id.
pub fn associate_with(tenant_id: Option<ZTenantId>, timeline_id: Option<ZTimelineId>) {
pub fn associate_with(tenant_id: Option<TenantId>, timeline_id: Option<TimelineId>) {
CURRENT_TASK.with(|ct| {
let mut task_mut = ct.mutable.lock().unwrap();
task_mut.tenant_id = tenant_id;
@@ -391,12 +391,12 @@ pub fn associate_with(tenant_id: Option<ZTenantId>, timeline_id: Option<ZTimelin
///
/// Or to shut down all tasks for given timeline:
///
/// shutdown_tasks(None, Some(tenantid), Some(timelineid))
/// shutdown_tasks(None, Some(tenant_id), Some(timeline_id))
///
pub async fn shutdown_tasks(
kind: Option<TaskKind>,
tenant_id: Option<ZTenantId>,
timeline_id: Option<ZTimelineId>,
tenant_id: Option<TenantId>,
timeline_id: Option<TimelineId>,
) {
let mut victim_tasks = Vec::new();

View File

@@ -4,7 +4,7 @@
//! The functions here are responsible for locating the correct layer for the
//! get/put call, walking back the timeline branching history as needed.
//!
//! The files are stored in the .neon/tenants/<tenantid>/timelines/<timelineid>
//! The files are stored in the .neon/tenants/<tenant_id>/timelines/<timeline_id>
//! directory. See docs/pageserver-storage.md for how the files are managed.
//! In addition to the layer files, there is a metadata file in the same
//! directory that contains information about the timeline, in particular its
@@ -48,8 +48,8 @@ use crate::CheckpointConfig;
use toml_edit;
use utils::{
crashsafe_dir,
id::{TenantId, TimelineId},
lsn::{Lsn, RecordLsn},
zid::{ZTenantId, ZTimelineId},
};
mod blob_io;
@@ -80,7 +80,7 @@ pub use crate::tenant::metadata::save_metadata;
// re-export for use in walreceiver
pub use crate::tenant::timeline::WalReceiverInfo;
/// Parts of the `.neon/tenants/<tenantid>/timelines/<timelineid>` directory prefix.
/// Parts of the `.neon/tenants/<tenant_id>/timelines/<timeline_id>` directory prefix.
pub const TIMELINES_SEGMENT_NAME: &str = "timelines";
///
@@ -98,8 +98,8 @@ pub struct Tenant {
// This is necessary to allow global config updates.
tenant_conf: Arc<RwLock<TenantConfOpt>>,
tenant_id: ZTenantId,
timelines: Mutex<HashMap<ZTimelineId, Arc<Timeline>>>,
tenant_id: TenantId,
timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
// This mutex prevents creation of new timelines during GC.
// Adding yet another mutex (in addition to `timelines`) is needed because holding
// `timelines` mutex during all GC iteration (especially with enforced checkpoint)
@@ -134,7 +134,7 @@ pub enum TenantState {
impl Tenant {
/// Get Timeline handle for given zenith timeline ID.
/// This function is idempotent. It doesn't change internal state in any way.
pub fn get_timeline(&self, timeline_id: ZTimelineId) -> anyhow::Result<Arc<Timeline>> {
pub fn get_timeline(&self, timeline_id: TimelineId) -> anyhow::Result<Arc<Timeline>> {
self.timelines
.lock()
.unwrap()
@@ -151,7 +151,7 @@ impl Tenant {
/// Lists timelines the tenant contains.
/// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
pub fn list_timelines(&self) -> Vec<(ZTimelineId, Arc<Timeline>)> {
pub fn list_timelines(&self) -> Vec<(TimelineId, Arc<Timeline>)> {
self.timelines
.lock()
.unwrap()
@@ -164,7 +164,7 @@ impl Tenant {
/// Initdb lsn is provided for timeline impl to be able to perform checks for some operations against it.
pub fn create_empty_timeline(
&self,
new_timeline_id: ZTimelineId,
new_timeline_id: TimelineId,
initdb_lsn: Lsn,
) -> Result<Arc<Timeline>> {
// XXX: keep the lock to avoid races during timeline creation
@@ -207,8 +207,8 @@ impl Tenant {
/// Branch a timeline
pub fn branch_timeline(
&self,
src: ZTimelineId,
dst: ZTimelineId,
src: TimelineId,
dst: TimelineId,
start_lsn: Option<Lsn>,
) -> Result<Arc<Timeline>> {
// We need to hold this lock to prevent GC from starting at the same time. GC scans the directory to learn
@@ -302,14 +302,14 @@ impl Tenant {
/// this function is periodically called by gc task.
/// also it can be explicitly requested through page server api 'do_gc' command.
///
/// 'timelineid' specifies the timeline to GC, or None for all.
/// 'target_timeline_id' specifies the timeline to GC, or None for all.
/// `horizon` specifies delta from last lsn to preserve all object versions (pitr interval).
/// `checkpoint_before_gc` parameter is used to force compaction of storage before GC
/// to make tests more deterministic.
/// TODO Do we still need it or we can call checkpoint explicitly in tests where needed?
pub fn gc_iteration(
&self,
target_timeline_id: Option<ZTimelineId>,
target_timeline_id: Option<TimelineId>,
horizon: u64,
pitr: Duration,
checkpoint_before_gc: bool,
@@ -337,13 +337,13 @@ impl Tenant {
let timelines = self.timelines.lock().unwrap();
let timelines_to_compact = timelines
.iter()
.map(|(timelineid, timeline)| (*timelineid, timeline.clone()))
.map(|(timeline_id, timeline)| (*timeline_id, timeline.clone()))
.collect::<Vec<_>>();
drop(timelines);
for (timelineid, timeline) in &timelines_to_compact {
for (timeline_id, timeline) in &timelines_to_compact {
let _entered =
info_span!("compact", timeline = %timelineid, tenant = %self.tenant_id).entered();
info_span!("compact", timeline = %timeline_id, tenant = %self.tenant_id).entered();
timeline.compact()?;
}
@@ -362,13 +362,13 @@ impl Tenant {
let timelines = self.timelines.lock().unwrap();
let timelines_to_compact = timelines
.iter()
.map(|(timelineid, timeline)| (*timelineid, Arc::clone(timeline)))
.map(|(timeline_id, timeline)| (*timeline_id, Arc::clone(timeline)))
.collect::<Vec<_>>();
drop(timelines);
for (timelineid, timeline) in &timelines_to_compact {
for (timeline_id, timeline) in &timelines_to_compact {
let _entered =
info_span!("checkpoint", timeline = %timelineid, tenant = %self.tenant_id)
info_span!("checkpoint", timeline = %timeline_id, tenant = %self.tenant_id)
.entered();
timeline.checkpoint(CheckpointConfig::Flush)?;
}
@@ -377,7 +377,7 @@ impl Tenant {
}
/// Removes timeline-related in-memory data
pub fn delete_timeline(&self, timeline_id: ZTimelineId) -> anyhow::Result<()> {
pub fn delete_timeline(&self, timeline_id: TimelineId) -> anyhow::Result<()> {
// in order to be retriable detach needs to be idempotent
// (or at least to a point that each time the detach is called it can make progress)
let mut timelines = self.timelines.lock().unwrap();
@@ -416,7 +416,7 @@ impl Tenant {
pub fn init_attach_timelines(
&self,
timelines: HashMap<ZTimelineId, TimelineMetadata>,
timelines: HashMap<TimelineId, TimelineMetadata>,
) -> anyhow::Result<()> {
let sorted_timelines = if timelines.len() == 1 {
timelines.into_iter().collect()
@@ -505,13 +505,13 @@ impl Tenant {
/// perform a topological sort, so that the parent of each timeline comes
/// before the children.
fn tree_sort_timelines(
timelines: HashMap<ZTimelineId, TimelineMetadata>,
) -> Result<Vec<(ZTimelineId, TimelineMetadata)>> {
timelines: HashMap<TimelineId, TimelineMetadata>,
) -> Result<Vec<(TimelineId, TimelineMetadata)>> {
let mut result = Vec::with_capacity(timelines.len());
let mut now = Vec::with_capacity(timelines.len());
// (ancestor, children)
let mut later: HashMap<ZTimelineId, Vec<(ZTimelineId, TimelineMetadata)>> =
let mut later: HashMap<TimelineId, Vec<(TimelineId, TimelineMetadata)>> =
HashMap::with_capacity(timelines.len());
for (timeline_id, metadata) in timelines {
@@ -636,9 +636,9 @@ impl Tenant {
fn initialize_new_timeline(
&self,
new_timeline_id: ZTimelineId,
new_timeline_id: TimelineId,
new_metadata: TimelineMetadata,
timelines: &mut MutexGuard<HashMap<ZTimelineId, Arc<Timeline>>>,
timelines: &mut MutexGuard<HashMap<TimelineId, Arc<Timeline>>>,
) -> anyhow::Result<Arc<Timeline>> {
let ancestor = match new_metadata.ancestor_timeline() {
Some(ancestor_timeline_id) => Some(
@@ -680,7 +680,7 @@ impl Tenant {
conf: &'static PageServerConf,
tenant_conf: TenantConfOpt,
walredo_mgr: Arc<dyn WalRedoManager + Send + Sync>,
tenant_id: ZTenantId,
tenant_id: TenantId,
remote_index: RemoteIndex,
upload_layers: bool,
) -> Tenant {
@@ -701,7 +701,7 @@ impl Tenant {
/// Locate and load config
pub fn load_tenant_config(
conf: &'static PageServerConf,
tenant_id: ZTenantId,
tenant_id: TenantId,
) -> anyhow::Result<TenantConfOpt> {
let target_config_path = TenantConf::path(conf, tenant_id);
let target_config_display = target_config_path.display();
@@ -830,7 +830,7 @@ impl Tenant {
// we do.
fn gc_iteration_internal(
&self,
target_timeline_id: Option<ZTimelineId>,
target_timeline_id: Option<TimelineId>,
horizon: u64,
pitr: Duration,
checkpoint_before_gc: bool,
@@ -848,7 +848,7 @@ impl Tenant {
// Scan all timelines. For each timeline, remember the timeline ID and
// the branch point where it was created.
let mut all_branchpoints: BTreeSet<(ZTimelineId, Lsn)> = BTreeSet::new();
let mut all_branchpoints: BTreeSet<(TimelineId, Lsn)> = BTreeSet::new();
let timeline_ids = {
if let Some(target_timeline_id) = target_timeline_id.as_ref() {
if timelines.get(target_timeline_id).is_none() {
@@ -861,11 +861,11 @@ impl Tenant {
.map(|(timeline_id, timeline_entry)| {
// This is unresolved question for now, how to do gc in presence of remote timelines
// especially when this is combined with branching.
// Somewhat related: https://github.com/zenithdb/zenith/issues/999
// Somewhat related: https://github.com/neondatabase/neon/issues/999
if let Some(ancestor_timeline_id) = &timeline_entry.get_ancestor_timeline_id() {
// If target_timeline is specified, we only need to know branchpoints of its children
if let Some(timelineid) = target_timeline_id {
if ancestor_timeline_id == &timelineid {
if let Some(timeline_id) = target_timeline_id {
if ancestor_timeline_id == &timeline_id {
all_branchpoints.insert((
*ancestor_timeline_id,
timeline_entry.get_ancestor_lsn(),
@@ -895,8 +895,8 @@ impl Tenant {
.with_context(|| format!("Timeline {timeline_id} was not found"))?;
// If target_timeline is specified, ignore all other timelines
if let Some(target_timelineid) = target_timeline_id {
if timeline_id != target_timelineid {
if let Some(target_timeline_id) = target_timeline_id {
if timeline_id != target_timeline_id {
continue;
}
}
@@ -952,7 +952,7 @@ impl Tenant {
Ok(totals)
}
pub fn tenant_id(&self) -> ZTenantId {
pub fn tenant_id(&self) -> TenantId {
self.tenant_id
}
}
@@ -998,7 +998,7 @@ pub mod harness {
config::PageServerConf,
repository::Key,
tenant::Tenant,
walrecord::ZenithWalRecord,
walrecord::NeonWalRecord,
walredo::{WalRedoError, WalRedoManager},
};
@@ -1006,12 +1006,12 @@ pub mod harness {
use super::*;
use crate::tenant_config::{TenantConf, TenantConfOpt};
use hex_literal::hex;
use utils::zid::{ZTenantId, ZTimelineId};
use utils::id::{TenantId, TimelineId};
pub const TIMELINE_ID: ZTimelineId =
ZTimelineId::from_array(hex!("11223344556677881122334455667788"));
pub const NEW_TIMELINE_ID: ZTimelineId =
ZTimelineId::from_array(hex!("AA223344556677881122334455667788"));
pub const TIMELINE_ID: TimelineId =
TimelineId::from_array(hex!("11223344556677881122334455667788"));
pub const NEW_TIMELINE_ID: TimelineId =
TimelineId::from_array(hex!("AA223344556677881122334455667788"));
/// Convenience function to create a page image with given string as the only content
#[allow(non_snake_case)]
@@ -1047,7 +1047,7 @@ pub mod harness {
pub struct TenantHarness<'a> {
pub conf: &'static PageServerConf,
pub tenant_conf: TenantConf,
pub tenant_id: ZTenantId,
pub tenant_id: TenantId,
pub lock_guard: (
Option<RwLockReadGuard<'a, ()>>,
@@ -1080,7 +1080,7 @@ pub mod harness {
let tenant_conf = TenantConf::dummy_conf();
let tenant_id = ZTenantId::generate();
let tenant_id = TenantId::generate();
fs::create_dir_all(conf.tenant_path(&tenant_id))?;
fs::create_dir_all(conf.timelines_path(&tenant_id))?;
@@ -1113,7 +1113,7 @@ pub mod harness {
.expect("should be able to read timelines dir")
{
let timeline_dir_entry = timeline_dir_entry?;
let timeline_id: ZTimelineId = timeline_dir_entry
let timeline_id: TimelineId = timeline_dir_entry
.path()
.file_name()
.unwrap()
@@ -1128,15 +1128,15 @@ pub mod harness {
Ok(tenant)
}
pub fn timeline_path(&self, timeline_id: &ZTimelineId) -> PathBuf {
pub fn timeline_path(&self, timeline_id: &TimelineId) -> PathBuf {
self.conf.timeline_path(timeline_id, &self.tenant_id)
}
}
fn load_metadata(
conf: &'static PageServerConf,
timeline_id: ZTimelineId,
tenant_id: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
) -> anyhow::Result<TimelineMetadata> {
let metadata_path = metadata_path(conf, timeline_id, tenant_id);
let metadata_bytes = std::fs::read(&metadata_path).with_context(|| {
@@ -1162,7 +1162,7 @@ pub mod harness {
key: Key,
lsn: Lsn,
base_img: Option<Bytes>,
records: Vec<(Lsn, ZenithWalRecord)>,
records: Vec<(Lsn, NeonWalRecord)>,
) -> Result<Bytes, WalRedoError> {
let s = format!(
"redo for {} to get to {}, with {} and {} records",
@@ -1747,7 +1747,7 @@ mod tests {
let mut tline_id = TIMELINE_ID;
for _ in 0..50 {
let new_tline_id = ZTimelineId::generate();
let new_tline_id = TimelineId::generate();
tenant.branch_timeline(tline_id, new_tline_id, Some(lsn))?;
tline = tenant
.get_timeline(new_tline_id)
@@ -1808,7 +1808,7 @@ mod tests {
#[allow(clippy::needless_range_loop)]
for idx in 0..NUM_TLINES {
let new_tline_id = ZTimelineId::generate();
let new_tline_id = TimelineId::generate();
tenant.branch_timeline(tline_id, new_tline_id, Some(lsn))?;
tline = tenant
.get_timeline(new_tline_id)

View File

@@ -7,7 +7,7 @@
//! must be page images or WAL records with the 'will_init' flag set, so that
//! they can be replayed without referring to an older page version.
//!
//! The delta files are stored in timelines/<timelineid> directory. Currently,
//! The delta files are stored in timelines/<timeline_id> directory. Currently,
//! there are no subdirectories, and each delta file is named like this:
//!
//! <key start>-<key end>__<start LSN>-<end LSN
@@ -48,8 +48,8 @@ use tracing::*;
use utils::{
bin_ser::BeSer,
id::{TenantId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTimelineId},
};
///
@@ -60,12 +60,12 @@ use utils::{
///
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
struct Summary {
/// Magic value to identify this as a zenith delta file. Always DELTA_FILE_MAGIC.
/// Magic value to identify this as a neon delta file. Always DELTA_FILE_MAGIC.
magic: u16,
format_version: u16,
tenantid: ZTenantId,
timelineid: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
key_range: Range<Key>,
lsn_range: Range<Lsn>,
@@ -81,8 +81,8 @@ impl From<&DeltaLayer> for Summary {
magic: DELTA_FILE_MAGIC,
format_version: STORAGE_FORMAT_VERSION,
tenantid: layer.tenantid,
timelineid: layer.timelineid,
tenant_id: layer.tenant_id,
timeline_id: layer.timeline_id,
key_range: layer.key_range.clone(),
lsn_range: layer.lsn_range.clone(),
@@ -173,8 +173,8 @@ impl DeltaKey {
pub struct DeltaLayer {
path_or_conf: PathOrConf,
pub tenantid: ZTenantId,
pub timelineid: ZTimelineId,
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub key_range: Range<Key>,
pub lsn_range: Range<Lsn>,
@@ -194,12 +194,12 @@ pub struct DeltaLayerInner {
}
impl Layer for DeltaLayer {
fn get_tenant_id(&self) -> ZTenantId {
self.tenantid
fn get_tenant_id(&self) -> TenantId {
self.tenant_id
}
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
fn get_timeline_id(&self) -> TimelineId {
self.timeline_id
}
fn get_key_range(&self) -> Range<Key> {
@@ -344,8 +344,8 @@ impl Layer for DeltaLayer {
fn dump(&self, verbose: bool) -> Result<()> {
println!(
"----- delta layer for ten {} tli {} keys {}-{} lsn {}-{} ----",
self.tenantid,
self.timelineid,
self.tenant_id,
self.timeline_id,
self.key_range.start,
self.key_range.end,
self.lsn_range.start,
@@ -419,22 +419,22 @@ impl Layer for DeltaLayer {
impl DeltaLayer {
fn path_for(
path_or_conf: &PathOrConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
fname: &DeltaFileName,
) -> PathBuf {
match path_or_conf {
PathOrConf::Path(path) => path.clone(),
PathOrConf::Conf(conf) => conf
.timeline_path(&timelineid, &tenantid)
.timeline_path(&timeline_id, &tenant_id)
.join(fname.to_string()),
}
}
fn temp_path_for(
conf: &PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
key_start: Key,
lsn_range: &Range<Lsn>,
) -> PathBuf {
@@ -444,7 +444,7 @@ impl DeltaLayer {
.map(char::from)
.collect();
conf.timeline_path(&timelineid, &tenantid).join(format!(
conf.timeline_path(&timeline_id, &tenant_id).join(format!(
"{}-XXX__{:016X}-{:016X}.{}.{}",
key_start,
u64::from(lsn_range.start),
@@ -535,14 +535,14 @@ impl DeltaLayer {
/// Create a DeltaLayer struct representing an existing file on disk.
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
filename: &DeltaFileName,
) -> DeltaLayer {
DeltaLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
timeline_id,
tenant_id,
key_range: filename.key_range.clone(),
lsn_range: filename.lsn_range.clone(),
inner: RwLock::new(DeltaLayerInner {
@@ -568,8 +568,8 @@ impl DeltaLayer {
Ok(DeltaLayer {
path_or_conf: PathOrConf::Path(path.to_path_buf()),
timelineid: summary.timelineid,
tenantid: summary.tenantid,
timeline_id: summary.timeline_id,
tenant_id: summary.tenant_id,
key_range: summary.key_range,
lsn_range: summary.lsn_range,
inner: RwLock::new(DeltaLayerInner {
@@ -592,8 +592,8 @@ impl DeltaLayer {
pub fn path(&self) -> PathBuf {
Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
self.timeline_id,
self.tenant_id,
&self.layer_name(),
)
}
@@ -613,8 +613,8 @@ impl DeltaLayer {
pub struct DeltaLayerWriter {
conf: &'static PageServerConf,
path: PathBuf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
key_start: Key,
lsn_range: Range<Lsn>,
@@ -630,8 +630,8 @@ impl DeltaLayerWriter {
///
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
key_start: Key,
lsn_range: Range<Lsn>,
) -> Result<DeltaLayerWriter> {
@@ -641,7 +641,7 @@ impl DeltaLayerWriter {
//
// Note: This overwrites any existing file. There shouldn't be any.
// FIXME: throw an error instead?
let path = DeltaLayer::temp_path_for(conf, timelineid, tenantid, key_start, &lsn_range);
let path = DeltaLayer::temp_path_for(conf, timeline_id, tenant_id, key_start, &lsn_range);
let mut file = VirtualFile::create(&path)?;
// make room for the header block
@@ -656,8 +656,8 @@ impl DeltaLayerWriter {
Ok(DeltaLayerWriter {
conf,
path,
timelineid,
tenantid,
timeline_id,
tenant_id,
key_start,
lsn_range,
tree: tree_builder,
@@ -718,8 +718,8 @@ impl DeltaLayerWriter {
let summary = Summary {
magic: DELTA_FILE_MAGIC,
format_version: STORAGE_FORMAT_VERSION,
tenantid: self.tenantid,
timelineid: self.timelineid,
tenant_id: self.tenant_id,
timeline_id: self.timeline_id,
key_range: self.key_start..key_end,
lsn_range: self.lsn_range.clone(),
index_start_blk,
@@ -733,8 +733,8 @@ impl DeltaLayerWriter {
// set inner.file here. The first read will have to re-open it.
let layer = DeltaLayer {
path_or_conf: PathOrConf::Conf(self.conf),
tenantid: self.tenantid,
timelineid: self.timelineid,
tenant_id: self.tenant_id,
timeline_id: self.timeline_id,
key_range: self.key_start..key_end,
lsn_range: self.lsn_range.clone(),
inner: RwLock::new(DeltaLayerInner {
@@ -753,8 +753,8 @@ impl DeltaLayerWriter {
// FIXME: throw an error instead?
let final_path = DeltaLayer::path_for(
&PathOrConf::Conf(self.conf),
self.timelineid,
self.tenantid,
self.timeline_id,
self.tenant_id,
&DeltaFileName {
key_range: self.key_start..key_end,
lsn_range: self.lsn_range,

View File

@@ -17,7 +17,7 @@ use std::ops::DerefMut;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use tracing::*;
use utils::zid::{ZTenantId, ZTimelineId};
use utils::id::{TenantId, TimelineId};
use std::os::unix::fs::FileExt;
@@ -39,8 +39,8 @@ pub struct EphemeralFiles {
pub struct EphemeralFile {
file_id: u64,
_tenantid: ZTenantId,
_timelineid: ZTimelineId,
_tenant_id: TenantId,
_timeline_id: TimelineId,
file: Arc<VirtualFile>,
pub size: u64,
@@ -49,15 +49,15 @@ pub struct EphemeralFile {
impl EphemeralFile {
pub fn create(
conf: &PageServerConf,
tenantid: ZTenantId,
timelineid: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
) -> Result<EphemeralFile, io::Error> {
let mut l = EPHEMERAL_FILES.write().unwrap();
let file_id = l.next_file_id;
l.next_file_id += 1;
let filename = conf
.timeline_path(&timelineid, &tenantid)
.timeline_path(&timeline_id, &tenant_id)
.join(PathBuf::from(format!("ephemeral-{}", file_id)));
let file = VirtualFile::open_with_options(
@@ -69,8 +69,8 @@ impl EphemeralFile {
Ok(EphemeralFile {
file_id,
_tenantid: tenantid,
_timelineid: timelineid,
_tenant_id: tenant_id,
_timeline_id: timeline_id,
file: file_rc,
size: 0,
})
@@ -338,7 +338,7 @@ mod tests {
fn harness(
test_name: &str,
) -> Result<(&'static PageServerConf, ZTenantId, ZTimelineId), io::Error> {
) -> Result<(&'static PageServerConf, TenantId, TimelineId), io::Error> {
let repo_dir = PageServerConf::test_repo_dir(test_name);
let _ = fs::remove_dir_all(&repo_dir);
let conf = PageServerConf::dummy_conf(repo_dir);
@@ -346,11 +346,11 @@ mod tests {
// OK in a test.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
let tenantid = ZTenantId::from_str("11000000000000000000000000000000").unwrap();
let timelineid = ZTimelineId::from_str("22000000000000000000000000000000").unwrap();
fs::create_dir_all(conf.timeline_path(&timelineid, &tenantid))?;
let tenant_id = TenantId::from_str("11000000000000000000000000000000").unwrap();
let timeline_id = TimelineId::from_str("22000000000000000000000000000000").unwrap();
fs::create_dir_all(conf.timeline_path(&timeline_id, &tenant_id))?;
Ok((conf, tenantid, timelineid))
Ok((conf, tenant_id, timeline_id))
}
// Helper function to slurp contents of a file, starting at the current position,
@@ -368,9 +368,9 @@ mod tests {
#[test]
fn test_ephemeral_files() -> Result<(), io::Error> {
let (conf, tenantid, timelineid) = harness("ephemeral_files")?;
let (conf, tenant_id, timeline_id) = harness("ephemeral_files")?;
let file_a = EphemeralFile::create(conf, tenantid, timelineid)?;
let file_a = EphemeralFile::create(conf, tenant_id, timeline_id)?;
file_a.write_all_at(b"foo", 0)?;
assert_eq!("foo", read_string(&file_a, 0, 20)?);
@@ -381,7 +381,7 @@ mod tests {
// Open a lot of files, enough to cause some page evictions.
let mut efiles = Vec::new();
for fileno in 0..100 {
let efile = EphemeralFile::create(conf, tenantid, timelineid)?;
let efile = EphemeralFile::create(conf, tenant_id, timeline_id)?;
efile.write_all_at(format!("file {}", fileno).as_bytes(), 0)?;
assert_eq!(format!("file {}", fileno), read_string(&efile, 0, 10)?);
efiles.push((fileno, efile));
@@ -399,9 +399,9 @@ mod tests {
#[test]
fn test_ephemeral_blobs() -> Result<(), io::Error> {
let (conf, tenantid, timelineid) = harness("ephemeral_blobs")?;
let (conf, tenant_id, timeline_id) = harness("ephemeral_blobs")?;
let mut file = EphemeralFile::create(conf, tenantid, timelineid)?;
let mut file = EphemeralFile::create(conf, tenant_id, timeline_id)?;
let pos_foo = file.write_blob(b"foo")?;
assert_eq!(b"foo", file.block_cursor().read_blob(pos_foo)?.as_slice());

View File

@@ -4,7 +4,7 @@
//! but does not exist in the layer, does not exist.
//!
//! An image layer is stored in a file on disk. The file is stored in
//! timelines/<timelineid> directory. Currently, there are no
//! timelines/<timeline_id> directory. Currently, there are no
//! subdirectories, and each image layer file is named like this:
//!
//! <key start>-<key end>__<LSN>
@@ -44,8 +44,8 @@ use tracing::*;
use utils::{
bin_ser::BeSer,
id::{TenantId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTimelineId},
};
///
@@ -56,12 +56,12 @@ use utils::{
///
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
struct Summary {
/// Magic value to identify this as a zenith image file. Always IMAGE_FILE_MAGIC.
/// Magic value to identify this as a neon image file. Always IMAGE_FILE_MAGIC.
magic: u16,
format_version: u16,
tenantid: ZTenantId,
timelineid: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
key_range: Range<Key>,
lsn: Lsn,
@@ -77,8 +77,8 @@ impl From<&ImageLayer> for Summary {
Self {
magic: IMAGE_FILE_MAGIC,
format_version: STORAGE_FORMAT_VERSION,
tenantid: layer.tenantid,
timelineid: layer.timelineid,
tenant_id: layer.tenant_id,
timeline_id: layer.timeline_id,
key_range: layer.key_range.clone(),
lsn: layer.lsn,
@@ -97,8 +97,8 @@ impl From<&ImageLayer> for Summary {
///
pub struct ImageLayer {
path_or_conf: PathOrConf,
pub tenantid: ZTenantId,
pub timelineid: ZTimelineId,
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub key_range: Range<Key>,
// This entry contains an image of all pages as of this LSN
@@ -128,12 +128,12 @@ impl Layer for ImageLayer {
Some(self.path())
}
fn get_tenant_id(&self) -> ZTenantId {
self.tenantid
fn get_tenant_id(&self) -> TenantId {
self.tenant_id
}
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
fn get_timeline_id(&self) -> TimelineId {
self.timeline_id
}
fn get_key_range(&self) -> Range<Key> {
@@ -202,7 +202,7 @@ impl Layer for ImageLayer {
fn dump(&self, verbose: bool) -> Result<()> {
println!(
"----- image layer for ten {} tli {} key {}-{} at {} ----",
self.tenantid, self.timelineid, self.key_range.start, self.key_range.end, self.lsn
self.tenant_id, self.timeline_id, self.key_range.start, self.key_range.end, self.lsn
);
if !verbose {
@@ -228,22 +228,22 @@ impl Layer for ImageLayer {
impl ImageLayer {
fn path_for(
path_or_conf: &PathOrConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
fname: &ImageFileName,
) -> PathBuf {
match path_or_conf {
PathOrConf::Path(path) => path.to_path_buf(),
PathOrConf::Conf(conf) => conf
.timeline_path(&timelineid, &tenantid)
.timeline_path(&timeline_id, &tenant_id)
.join(fname.to_string()),
}
}
fn temp_path_for(
conf: &PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
fname: &ImageFileName,
) -> PathBuf {
let rand_string: String = rand::thread_rng()
@@ -252,7 +252,7 @@ impl ImageLayer {
.map(char::from)
.collect();
conf.timeline_path(&timelineid, &tenantid)
conf.timeline_path(&timeline_id, &tenant_id)
.join(format!("{fname}.{rand_string}.{TEMP_FILE_SUFFIX}"))
}
@@ -336,14 +336,14 @@ impl ImageLayer {
/// Create an ImageLayer struct representing an existing file on disk
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
filename: &ImageFileName,
) -> ImageLayer {
ImageLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
timeline_id,
tenant_id,
key_range: filename.key_range.clone(),
lsn: filename.lsn,
inner: RwLock::new(ImageLayerInner {
@@ -369,8 +369,8 @@ impl ImageLayer {
Ok(ImageLayer {
path_or_conf: PathOrConf::Path(path.to_path_buf()),
timelineid: summary.timelineid,
tenantid: summary.tenantid,
timeline_id: summary.timeline_id,
tenant_id: summary.tenant_id,
key_range: summary.key_range,
lsn: summary.lsn,
inner: RwLock::new(ImageLayerInner {
@@ -393,8 +393,8 @@ impl ImageLayer {
pub fn path(&self) -> PathBuf {
Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
self.timeline_id,
self.tenant_id,
&self.layer_name(),
)
}
@@ -414,8 +414,8 @@ impl ImageLayer {
pub struct ImageLayerWriter {
conf: &'static PageServerConf,
path: PathBuf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
key_range: Range<Key>,
lsn: Lsn,
@@ -426,8 +426,8 @@ pub struct ImageLayerWriter {
impl ImageLayerWriter {
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
key_range: &Range<Key>,
lsn: Lsn,
) -> anyhow::Result<ImageLayerWriter> {
@@ -435,8 +435,8 @@ impl ImageLayerWriter {
// We'll atomically rename it to the final name when we're done.
let path = ImageLayer::temp_path_for(
conf,
timelineid,
tenantid,
timeline_id,
tenant_id,
&ImageFileName {
key_range: key_range.clone(),
lsn,
@@ -458,8 +458,8 @@ impl ImageLayerWriter {
let writer = ImageLayerWriter {
conf,
path,
timelineid,
tenantid,
timeline_id,
tenant_id,
key_range: key_range.clone(),
lsn,
tree: tree_builder,
@@ -502,8 +502,8 @@ impl ImageLayerWriter {
let summary = Summary {
magic: IMAGE_FILE_MAGIC,
format_version: STORAGE_FORMAT_VERSION,
tenantid: self.tenantid,
timelineid: self.timelineid,
tenant_id: self.tenant_id,
timeline_id: self.timeline_id,
key_range: self.key_range.clone(),
lsn: self.lsn,
index_start_blk,
@@ -517,8 +517,8 @@ impl ImageLayerWriter {
// set inner.file here. The first read will have to re-open it.
let layer = ImageLayer {
path_or_conf: PathOrConf::Conf(self.conf),
timelineid: self.timelineid,
tenantid: self.tenantid,
timeline_id: self.timeline_id,
tenant_id: self.tenant_id,
key_range: self.key_range.clone(),
lsn: self.lsn,
inner: RwLock::new(ImageLayerInner {
@@ -538,8 +538,8 @@ impl ImageLayerWriter {
// FIXME: throw an error instead?
let final_path = ImageLayer::path_for(
&PathOrConf::Conf(self.conf),
self.timelineid,
self.tenantid,
self.timeline_id,
self.tenant_id,
&ImageFileName {
key_range: self.key_range.clone(),
lsn: self.lsn,

View File

@@ -18,9 +18,9 @@ use std::collections::HashMap;
use tracing::*;
use utils::{
bin_ser::BeSer,
id::{TenantId, TimelineId},
lsn::Lsn,
vec_map::VecMap,
zid::{ZTenantId, ZTimelineId},
};
// avoid binding to Write (conflicts with std::io::Write)
// while being able to use std::fmt::Write's methods
@@ -37,8 +37,8 @@ thread_local! {
pub struct InMemoryLayer {
conf: &'static PageServerConf,
tenantid: ZTenantId,
timelineid: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
///
/// This layer contains all the changes from 'start_lsn'. The
@@ -94,12 +94,12 @@ impl Layer for InMemoryLayer {
None
}
fn get_tenant_id(&self) -> ZTenantId {
self.tenantid
fn get_tenant_id(&self) -> TenantId {
self.tenant_id
}
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
fn get_timeline_id(&self) -> TimelineId {
self.timeline_id
}
fn get_key_range(&self) -> Range<Key> {
@@ -197,7 +197,7 @@ impl Layer for InMemoryLayer {
println!(
"----- in-memory layer for tli {} LSNs {}-{} ----",
self.timelineid, self.start_lsn, end_str,
self.timeline_id, self.start_lsn, end_str,
);
if !verbose {
@@ -251,22 +251,18 @@ impl InMemoryLayer {
///
pub fn create(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
start_lsn: Lsn,
) -> Result<InMemoryLayer> {
trace!(
"initializing new empty InMemoryLayer for writing on timeline {} at {}",
timelineid,
start_lsn
);
trace!("initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}");
let file = EphemeralFile::create(conf, tenantid, timelineid)?;
let file = EphemeralFile::create(conf, tenant_id, timeline_id)?;
Ok(InMemoryLayer {
conf,
timelineid,
tenantid,
timeline_id,
tenant_id,
start_lsn,
inner: RwLock::new(InMemoryLayerInner {
end_lsn: None,
@@ -281,7 +277,7 @@ impl InMemoryLayer {
/// Common subroutine of the public put_wal_record() and put_page_image() functions.
/// Adds the page version to the in-memory tree
pub fn put_value(&self, key: Key, lsn: Lsn, val: &Value) -> Result<()> {
trace!("put_value key {} at {}/{}", key, self.timelineid, lsn);
trace!("put_value key {} at {}/{}", key, self.timeline_id, lsn);
let mut inner = self.inner.write().unwrap();
inner.assert_writeable();
@@ -344,8 +340,8 @@ impl InMemoryLayer {
let mut delta_layer_writer = DeltaLayerWriter::new(
self.conf,
self.timelineid,
self.tenantid,
self.timeline_id,
self.tenant_id,
Key::MIN,
self.start_lsn..inner.end_lsn.unwrap(),
)?;

View File

@@ -2,7 +2,7 @@
//! The layer map tracks what layers exist in a timeline.
//!
//! When the timeline is first accessed, the server lists of all layer files
//! in the timelines/<timelineid> directory, and populates this map with
//! in the timelines/<timeline_id> directory, and populates this map with
//! ImageLayer and DeltaLayer structs corresponding to each file. When the first
//! new WAL record is received, we create an InMemoryLayer to hold the incoming
//! records. Now and then, in the checkpoint() function, the in-memory layer is

View File

@@ -15,8 +15,8 @@ use serde::{Deserialize, Serialize};
use tracing::info_span;
use utils::{
bin_ser::BeSer,
id::{TenantId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTimelineId},
};
use crate::config::PageServerConf;
@@ -63,7 +63,7 @@ struct TimelineMetadataBody {
// doing a clean shutdown, so that there is no more WAL beyond
// 'disk_consistent_lsn'
prev_record_lsn: Option<Lsn>,
ancestor_timeline: Option<ZTimelineId>,
ancestor_timeline: Option<TimelineId>,
ancestor_lsn: Lsn,
latest_gc_cutoff_lsn: Lsn,
initdb_lsn: Lsn,
@@ -73,7 +73,7 @@ impl TimelineMetadata {
pub fn new(
disk_consistent_lsn: Lsn,
prev_record_lsn: Option<Lsn>,
ancestor_timeline: Option<ZTimelineId>,
ancestor_timeline: Option<TimelineId>,
ancestor_lsn: Lsn,
latest_gc_cutoff_lsn: Lsn,
initdb_lsn: Lsn,
@@ -149,7 +149,7 @@ impl TimelineMetadata {
self.body.prev_record_lsn
}
pub fn ancestor_timeline(&self) -> Option<ZTimelineId> {
pub fn ancestor_timeline(&self) -> Option<TimelineId> {
self.body.ancestor_timeline
}
@@ -170,23 +170,23 @@ impl TimelineMetadata {
/// where certain timeline's metadata file should be located.
pub fn metadata_path(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
) -> PathBuf {
conf.timeline_path(&timelineid, &tenantid)
conf.timeline_path(&timeline_id, &tenant_id)
.join(METADATA_FILE_NAME)
}
/// Save timeline metadata to file
pub fn save_metadata(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
data: &TimelineMetadata,
first_save: bool,
) -> anyhow::Result<()> {
let _enter = info_span!("saving metadata").entered();
let path = metadata_path(conf, timelineid, tenantid);
let path = metadata_path(conf, timeline_id, tenant_id);
// use OpenOptions to ensure file presence is consistent with first_save
let mut file = VirtualFile::open_with_options(
&path,

View File

@@ -3,15 +3,15 @@
//!
use crate::repository::{Key, Value};
use crate::walrecord::ZenithWalRecord;
use crate::walrecord::NeonWalRecord;
use anyhow::Result;
use bytes::Bytes;
use std::ops::Range;
use std::path::PathBuf;
use utils::{
id::{TenantId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTimelineId},
};
pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
@@ -50,7 +50,7 @@ where
///
#[derive(Debug)]
pub struct ValueReconstructState {
pub records: Vec<(Lsn, ZenithWalRecord)>,
pub records: Vec<(Lsn, NeonWalRecord)>,
pub img: Option<(Lsn, Bytes)>,
}
@@ -84,10 +84,10 @@ pub enum ValueReconstructResult {
/// LSN
///
pub trait Layer: Send + Sync {
fn get_tenant_id(&self) -> ZTenantId;
fn get_tenant_id(&self) -> TenantId;
/// Identify the timeline this layer belongs to
fn get_timeline_id(&self) -> ZTimelineId;
fn get_timeline_id(&self) -> TimelineId;
/// Range of keys that this layer covers
fn get_key_range(&self) -> Range<Key>;

View File

@@ -39,10 +39,10 @@ use crate::tenant_config::TenantConfOpt;
use postgres_ffi::v14::xlog_utils::to_pg_timestamp;
use utils::{
id::{TenantId, TimelineId},
lsn::{AtomicLsn, Lsn, RecordLsn},
seqwait::SeqWait,
simple_rcu::{Rcu, RcuReadGuard},
zid::{ZTenantId, ZTimelineId},
};
use crate::repository::GcResult;
@@ -58,8 +58,8 @@ pub struct Timeline {
conf: &'static PageServerConf,
tenant_conf: Arc<RwLock<TenantConfOpt>>,
pub tenant_id: ZTenantId,
pub timeline_id: ZTimelineId,
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub layers: RwLock<LayerMap>,
@@ -312,7 +312,7 @@ impl Timeline {
}
/// Get the ancestor's timeline id
pub fn get_ancestor_timeline_id(&self) -> Option<ZTimelineId> {
pub fn get_ancestor_timeline_id(&self) -> Option<TimelineId> {
self.ancestor_timeline
.as_ref()
.map(|ancestor| ancestor.timeline_id)
@@ -531,8 +531,8 @@ impl Timeline {
tenant_conf: Arc<RwLock<TenantConfOpt>>,
metadata: TimelineMetadata,
ancestor: Option<Arc<Timeline>>,
timeline_id: ZTimelineId,
tenant_id: ZTenantId,
timeline_id: TimelineId,
tenant_id: TenantId,
walredo_mgr: Arc<dyn WalRedoManager + Send + Sync>,
upload_layers: bool,
) -> Timeline {
@@ -1250,7 +1250,7 @@ impl Timeline {
None
};
let ancestor_timelineid = self
let ancestor_timeline_id = self
.ancestor_timeline
.as_ref()
.map(|ancestor| ancestor.timeline_id);
@@ -1258,7 +1258,7 @@ impl Timeline {
let metadata = TimelineMetadata::new(
disk_consistent_lsn,
ondisk_prev_record_lsn,
ancestor_timelineid,
ancestor_timeline_id,
self.ancestor_lsn,
*self.latest_gc_cutoff_lsn.read(),
self.initdb_lsn,

View File

@@ -13,7 +13,7 @@ use serde::{Deserialize, Serialize};
use std::num::NonZeroU64;
use std::path::PathBuf;
use std::time::Duration;
use utils::zid::ZTenantId;
use utils::id::TenantId;
pub const TENANT_CONFIG_NAME: &str = "config";
@@ -217,8 +217,8 @@ impl TenantConf {
/// Points to a place in pageserver's local directory,
/// where certain tenant's tenantconf file should be located.
pub fn path(conf: &'static PageServerConf, tenantid: ZTenantId) -> PathBuf {
conf.tenant_path(&tenantid).join(TENANT_CONFIG_NAME)
pub fn path(conf: &'static PageServerConf, tenant_id: TenantId) -> PathBuf {
conf.tenant_path(&tenant_id).join(TENANT_CONFIG_NAME)
}
#[cfg(test)]

View File

@@ -27,7 +27,7 @@ use crate::walredo::PostgresRedoManager;
use crate::{TenantTimelineValues, TEMP_FILE_SUFFIX};
use utils::crashsafe_dir;
use utils::zid::{ZTenantId, ZTimelineId};
use utils::id::{TenantId, TimelineId};
mod tenants_state {
use once_cell::sync::Lazy;
@@ -35,20 +35,20 @@ mod tenants_state {
collections::HashMap,
sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard},
};
use utils::zid::ZTenantId;
use utils::id::TenantId;
use crate::tenant::Tenant;
static TENANTS: Lazy<RwLock<HashMap<ZTenantId, Arc<Tenant>>>> =
static TENANTS: Lazy<RwLock<HashMap<TenantId, Arc<Tenant>>>> =
Lazy::new(|| RwLock::new(HashMap::new()));
pub(super) fn read_tenants() -> RwLockReadGuard<'static, HashMap<ZTenantId, Arc<Tenant>>> {
pub(super) fn read_tenants() -> RwLockReadGuard<'static, HashMap<TenantId, Arc<Tenant>>> {
TENANTS
.read()
.expect("Failed to read() tenants lock, it got poisoned")
}
pub(super) fn write_tenants() -> RwLockWriteGuard<'static, HashMap<ZTenantId, Arc<Tenant>>> {
pub(super) fn write_tenants() -> RwLockWriteGuard<'static, HashMap<TenantId, Arc<Tenant>>> {
TENANTS
.write()
.expect("Failed to write() tenants lock, it got poisoned")
@@ -159,7 +159,7 @@ pub fn attach_local_tenants(
fn load_local_tenant(
conf: &'static PageServerConf,
tenant_id: ZTenantId,
tenant_id: TenantId,
remote_index: &RemoteIndex,
) -> Arc<Tenant> {
let tenant = Arc::new(Tenant::new(
@@ -225,7 +225,7 @@ pub async fn shutdown_all_tenants() {
fn create_tenant_files(
conf: &'static PageServerConf,
tenant_conf: TenantConfOpt,
tenant_id: ZTenantId,
tenant_id: TenantId,
) -> anyhow::Result<()> {
let target_tenant_directory = conf.tenant_path(&tenant_id);
anyhow::ensure!(
@@ -310,9 +310,9 @@ fn rebase_directory(original_path: &Path, base: &Path, new_base: &Path) -> anyho
pub fn create_tenant(
conf: &'static PageServerConf,
tenant_conf: TenantConfOpt,
tenant_id: ZTenantId,
tenant_id: TenantId,
remote_index: RemoteIndex,
) -> anyhow::Result<Option<ZTenantId>> {
) -> anyhow::Result<Option<TenantId>> {
match tenants_state::write_tenants().entry(tenant_id) {
hash_map::Entry::Occupied(_) => {
debug!("tenant {tenant_id} already exists");
@@ -339,7 +339,7 @@ pub fn create_tenant(
pub fn update_tenant_config(
conf: &'static PageServerConf,
tenant_conf: TenantConfOpt,
tenant_id: ZTenantId,
tenant_id: TenantId,
) -> anyhow::Result<()> {
info!("configuring tenant {tenant_id}");
get_tenant(tenant_id, true)?.update_tenant_config(tenant_conf);
@@ -349,7 +349,7 @@ pub fn update_tenant_config(
/// Gets the tenant from the in-memory data, erroring if it's absent or is not fitting to the query.
/// `active_only = true` allows to query only tenants that are ready for operations, erroring on other kinds of tenants.
pub fn get_tenant(tenant_id: ZTenantId, active_only: bool) -> anyhow::Result<Arc<Tenant>> {
pub fn get_tenant(tenant_id: TenantId, active_only: bool) -> anyhow::Result<Arc<Tenant>> {
let m = tenants_state::read_tenants();
let tenant = m
.get(&tenant_id)
@@ -361,7 +361,7 @@ pub fn get_tenant(tenant_id: ZTenantId, active_only: bool) -> anyhow::Result<Arc
}
}
pub async fn delete_timeline(tenant_id: ZTenantId, timeline_id: ZTimelineId) -> anyhow::Result<()> {
pub async fn delete_timeline(tenant_id: TenantId, timeline_id: TimelineId) -> anyhow::Result<()> {
// Start with the shutdown of timeline tasks (this shuts down the walreceiver)
// It is important that we do not take locks here, and do not check whether the timeline exists
// because if we hold tenants_state::write_tenants() while awaiting for the tasks to join
@@ -398,7 +398,7 @@ pub async fn delete_timeline(tenant_id: ZTenantId, timeline_id: ZTimelineId) ->
pub async fn detach_tenant(
conf: &'static PageServerConf,
tenant_id: ZTenantId,
tenant_id: TenantId,
) -> anyhow::Result<()> {
let tenant = match {
let mut tenants_accessor = tenants_state::write_tenants();
@@ -565,14 +565,14 @@ fn collect_timelines_for_tenant(
config: &'static PageServerConf,
tenant_path: &Path,
) -> anyhow::Result<(
ZTenantId,
HashMap<ZTimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
TenantId,
HashMap<TimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
)> {
let tenant_id = tenant_path
.file_name()
.and_then(OsStr::to_str)
.unwrap_or_default()
.parse::<ZTenantId>()
.parse::<TenantId>()
.context("Could not parse tenant id out of the tenant dir name")?;
let timelines_dir = config.timelines_path(&tenant_id);
@@ -644,7 +644,7 @@ fn collect_timelines_for_tenant(
// NOTE: ephemeral files are excluded from the list
fn collect_timeline_files(
timeline_dir: &Path,
) -> anyhow::Result<(ZTimelineId, TimelineMetadata, HashSet<PathBuf>)> {
) -> anyhow::Result<(TimelineId, TimelineMetadata, HashSet<PathBuf>)> {
let mut timeline_files = HashSet::new();
let mut timeline_metadata_path = None;
@@ -652,7 +652,7 @@ fn collect_timeline_files(
.file_name()
.and_then(OsStr::to_str)
.unwrap_or_default()
.parse::<ZTimelineId>()
.parse::<TimelineId>()
.context("Could not parse timeline id out of the timeline dir name")?;
let timeline_dir_entries =
fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?;

View File

@@ -10,9 +10,9 @@ use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME};
use crate::tenant::{Tenant, TenantState};
use crate::tenant_mgr;
use tracing::*;
use utils::zid::ZTenantId;
use utils::id::TenantId;
pub fn start_background_loops(tenant_id: ZTenantId) {
pub fn start_background_loops(tenant_id: TenantId) {
task_mgr::spawn(
BACKGROUND_RUNTIME.handle(),
TaskKind::Compaction,
@@ -42,9 +42,8 @@ pub fn start_background_loops(tenant_id: ZTenantId) {
///
/// Compaction task's main loop
///
async fn compaction_loop(tenant_id: ZTenantId) {
async fn compaction_loop(tenant_id: TenantId) {
let wait_duration = Duration::from_secs(2);
info!("starting compaction loop for {tenant_id}");
TENANT_TASK_EVENTS.with_label_values(&["start"]).inc();
async {
@@ -90,9 +89,8 @@ async fn compaction_loop(tenant_id: ZTenantId) {
///
/// GC task's main loop
///
async fn gc_loop(tenant_id: ZTenantId) {
async fn gc_loop(tenant_id: TenantId) {
let wait_duration = Duration::from_secs(2);
info!("starting gc loop for {tenant_id}");
TENANT_TASK_EVENTS.with_label_values(&["start"]).inc();
async {
@@ -138,7 +136,7 @@ async fn gc_loop(tenant_id: ZTenantId) {
}
async fn wait_for_active_tenant(
tenant_id: ZTenantId,
tenant_id: TenantId,
wait: Duration,
) -> ControlFlow<(), Arc<Tenant>> {
let tenant = loop {

View File

@@ -14,8 +14,8 @@ use tracing::*;
use remote_storage::path_with_suffix_extension;
use utils::{
id::{TenantId, TimelineId},
lsn::Lsn,
zid::{ZTenantId, ZTimelineId},
};
use crate::config::PageServerConf;
@@ -61,8 +61,8 @@ fn run_initdb(conf: &'static PageServerConf, initdbpath: &Path) -> Result<()> {
//
fn bootstrap_timeline(
conf: &'static PageServerConf,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
tenant_id: TenantId,
timeline_id: TimelineId,
tenant: &Tenant,
) -> Result<Arc<Timeline>> {
// create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
@@ -115,12 +115,12 @@ fn bootstrap_timeline(
///
pub(crate) async fn create_timeline(
conf: &'static PageServerConf,
tenant_id: ZTenantId,
new_timeline_id: Option<ZTimelineId>,
ancestor_timeline_id: Option<ZTimelineId>,
tenant_id: TenantId,
new_timeline_id: Option<TimelineId>,
ancestor_timeline_id: Option<TimelineId>,
mut ancestor_start_lsn: Option<Lsn>,
) -> Result<Option<Arc<Timeline>>> {
let new_timeline_id = new_timeline_id.unwrap_or_else(ZTimelineId::generate);
let new_timeline_id = new_timeline_id.unwrap_or_else(TimelineId::generate);
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
if conf.timeline_path(&new_timeline_id, &tenant_id).exists() {

View File

@@ -53,8 +53,8 @@ pub struct VirtualFile {
pub path: PathBuf,
open_options: OpenOptions,
tenantid: String,
timelineid: String,
tenant_id: String,
timeline_id: String,
}
#[derive(Debug, PartialEq, Clone, Copy)]
@@ -149,7 +149,7 @@ impl OpenFiles {
// old file.
//
if let Some(old_file) = slot_guard.file.take() {
// We do not have information about tenantid/timelineid of evicted file.
// We do not have information about tenant_id/timeline_id of evicted file.
// It is possible to store path together with file or use filepath crate,
// but as far as close() is not expected to be fast, it is not so critical to gather
// precise per-tenant statistic here.
@@ -197,18 +197,18 @@ impl VirtualFile {
) -> Result<VirtualFile, std::io::Error> {
let path_str = path.to_string_lossy();
let parts = path_str.split('/').collect::<Vec<&str>>();
let tenantid;
let timelineid;
let tenant_id;
let timeline_id;
if parts.len() > 5 && parts[parts.len() - 5] == "tenants" {
tenantid = parts[parts.len() - 4].to_string();
timelineid = parts[parts.len() - 2].to_string();
tenant_id = parts[parts.len() - 4].to_string();
timeline_id = parts[parts.len() - 2].to_string();
} else {
tenantid = "*".to_string();
timelineid = "*".to_string();
tenant_id = "*".to_string();
timeline_id = "*".to_string();
}
let (handle, mut slot_guard) = get_open_files().find_victim_slot();
let file = STORAGE_IO_TIME
.with_label_values(&["open", &tenantid, &timelineid])
.with_label_values(&["open", &tenant_id, &timeline_id])
.observe_closure_duration(|| open_options.open(path))?;
// Strip all options other than read and write.
@@ -226,8 +226,8 @@ impl VirtualFile {
pos: 0,
path: path.to_path_buf(),
open_options: reopen_options,
tenantid,
timelineid,
tenant_id,
timeline_id,
};
slot_guard.file.replace(file);
@@ -267,7 +267,7 @@ impl VirtualFile {
// Found a cached file descriptor.
slot.recently_used.store(true, Ordering::Relaxed);
return Ok(STORAGE_IO_TIME
.with_label_values(&[op, &self.tenantid, &self.timelineid])
.with_label_values(&[op, &self.tenant_id, &self.timeline_id])
.observe_closure_duration(|| func(file)));
}
}
@@ -294,7 +294,7 @@ impl VirtualFile {
// Open the physical file
let file = STORAGE_IO_TIME
.with_label_values(&["open", &self.tenantid, &self.timelineid])
.with_label_values(&["open", &self.tenant_id, &self.timeline_id])
.observe_closure_duration(|| self.open_options.open(&self.path))?;
// Perform the requested operation on it
@@ -308,7 +308,7 @@ impl VirtualFile {
// may deadlock on subsequent read calls.
// Simply replacing all `RwLock` in project causes deadlocks, so use it sparingly.
let result = STORAGE_IO_TIME
.with_label_values(&[op, &self.tenantid, &self.timelineid])
.with_label_values(&[op, &self.tenant_id, &self.timeline_id])
.observe_closure_duration(|| func(&file));
// Store the File in the slot and update the handle in the VirtualFile
@@ -333,11 +333,11 @@ impl Drop for VirtualFile {
if slot_guard.tag == handle.tag {
slot.recently_used.store(false, Ordering::Relaxed);
// Unlike files evicted by replacement algorithm, here
// we group close time by tenantid/timelineid.
// we group close time by tenant_id/timeline_id.
// At allows to compare number/time of "normal" file closes
// with file eviction.
STORAGE_IO_TIME
.with_label_values(&["close", &self.tenantid, &self.timelineid])
.with_label_values(&["close", &self.tenant_id, &self.timeline_id])
.observe_closure_duration(|| slot_guard.file.take());
}
}
@@ -399,7 +399,7 @@ impl FileExt for VirtualFile {
let result = self.with_file("read", |file| file.read_at(buf, offset))?;
if let Ok(size) = result {
STORAGE_IO_SIZE
.with_label_values(&["read", &self.tenantid, &self.timelineid])
.with_label_values(&["read", &self.tenant_id, &self.timeline_id])
.add(size as i64);
}
result
@@ -409,7 +409,7 @@ impl FileExt for VirtualFile {
let result = self.with_file("write", |file| file.write_at(buf, offset))?;
if let Ok(size) = result {
STORAGE_IO_SIZE
.with_label_values(&["write", &self.tenantid, &self.timelineid])
.with_label_values(&["write", &self.tenant_id, &self.timeline_id])
.add(size as i64);
}
result

View File

@@ -1,5 +1,5 @@
//!
//! Parse PostgreSQL WAL records and store them in a zenith Timeline.
//! Parse PostgreSQL WAL records and store them in a neon Timeline.
//!
//! The pipeline for ingesting WAL looks like this:
//!
@@ -9,7 +9,7 @@
//! and decodes it to individual WAL records. It feeds the WAL records
//! to WalIngest, which parses them and stores them in the Repository.
//!
//! The zenith Repository can store page versions in two formats: as
//! The neon Repository can store page versions in two formats: as
//! page images, or a WAL records. WalIngest::ingest_record() extracts
//! page images out of some WAL records, but most it stores as WAL
//! records. If a WAL record modifies multiple pages, WalIngest
@@ -315,7 +315,7 @@ impl<'a> WalIngest<'a> {
assert_eq!(image.len(), BLCKSZ as usize);
self.put_rel_page_image(modification, rel, blk.blkno, image.freeze())?;
} else {
let rec = ZenithWalRecord::Postgres {
let rec = NeonWalRecord::Postgres {
will_init: blk.will_init || blk.apply_image,
rec: decoded.record.clone(),
};
@@ -428,7 +428,7 @@ impl<'a> WalIngest<'a> {
modification,
vm_rel,
new_vm_blk.unwrap(),
ZenithWalRecord::ClearVisibilityMapFlags {
NeonWalRecord::ClearVisibilityMapFlags {
new_heap_blkno,
old_heap_blkno,
flags: pg_constants::VISIBILITYMAP_VALID_BITS,
@@ -442,7 +442,7 @@ impl<'a> WalIngest<'a> {
modification,
vm_rel,
new_vm_blk,
ZenithWalRecord::ClearVisibilityMapFlags {
NeonWalRecord::ClearVisibilityMapFlags {
new_heap_blkno,
old_heap_blkno: None,
flags: pg_constants::VISIBILITYMAP_VALID_BITS,
@@ -454,7 +454,7 @@ impl<'a> WalIngest<'a> {
modification,
vm_rel,
old_vm_blk,
ZenithWalRecord::ClearVisibilityMapFlags {
NeonWalRecord::ClearVisibilityMapFlags {
new_heap_blkno: None,
old_heap_blkno,
flags: pg_constants::VISIBILITYMAP_VALID_BITS,
@@ -642,12 +642,12 @@ impl<'a> WalIngest<'a> {
segno,
rpageno,
if is_commit {
ZenithWalRecord::ClogSetCommitted {
NeonWalRecord::ClogSetCommitted {
xids: page_xids,
timestamp: parsed.xact_time,
}
} else {
ZenithWalRecord::ClogSetAborted { xids: page_xids }
NeonWalRecord::ClogSetAborted { xids: page_xids }
},
)?;
page_xids = Vec::new();
@@ -662,12 +662,12 @@ impl<'a> WalIngest<'a> {
segno,
rpageno,
if is_commit {
ZenithWalRecord::ClogSetCommitted {
NeonWalRecord::ClogSetCommitted {
xids: page_xids,
timestamp: parsed.xact_time,
}
} else {
ZenithWalRecord::ClogSetAborted { xids: page_xids }
NeonWalRecord::ClogSetAborted { xids: page_xids }
},
)?;
@@ -760,7 +760,7 @@ impl<'a> WalIngest<'a> {
SlruKind::MultiXactOffsets,
segno,
rpageno,
ZenithWalRecord::MultixactOffsetCreate {
NeonWalRecord::MultixactOffsetCreate {
mid: xlrec.mid,
moff: xlrec.moff,
},
@@ -794,7 +794,7 @@ impl<'a> WalIngest<'a> {
SlruKind::MultiXactMembers,
pageno / pg_constants::SLRU_PAGES_PER_SEGMENT,
pageno % pg_constants::SLRU_PAGES_PER_SEGMENT,
ZenithWalRecord::MultixactMembersCreate {
NeonWalRecord::MultixactMembersCreate {
moff: offset,
members: this_page_members,
},
@@ -901,7 +901,7 @@ impl<'a> WalIngest<'a> {
modification: &mut DatadirModification,
rel: RelTag,
blknum: BlockNumber,
rec: ZenithWalRecord,
rec: NeonWalRecord,
) -> Result<()> {
self.handle_rel_extend(modification, rel, blknum)?;
modification.put_rel_wal_record(rel, blknum, rec)?;

View File

@@ -34,8 +34,8 @@ use crate::{
DEFAULT_MAX_BACKOFF_SECONDS,
};
use utils::{
id::{NodeId, TenantTimelineId},
lsn::Lsn,
zid::{NodeId, ZTenantTimelineId},
};
use super::{walreceiver_connection::WalConnectionStatus, TaskEvent, TaskHandle};
@@ -101,7 +101,7 @@ async fn connection_manager_loop_step(
etcd_client: &mut Client,
walreceiver_state: &mut WalreceiverState,
) {
let id = ZTenantTimelineId {
let id = TenantTimelineId {
tenant_id: walreceiver_state.timeline.tenant_id,
timeline_id: walreceiver_state.timeline.timeline_id,
};
@@ -230,7 +230,7 @@ fn cleanup_broker_connection(
async fn subscribe_for_timeline_updates(
etcd_client: &mut Client,
broker_prefix: &str,
id: ZTenantTimelineId,
id: TenantTimelineId,
) -> BrokerSubscription<SkTimelineInfo> {
let mut attempt = 0;
loop {
@@ -266,7 +266,7 @@ const WALCONNECTION_RETRY_BACKOFF_MULTIPLIER: f64 = 1.5;
/// All data that's needed to run endless broker loop and keep the WAL streaming connection alive, if possible.
struct WalreceiverState {
id: ZTenantTimelineId,
id: TenantTimelineId,
/// Use pageserver data about the timeline to filter out some of the safekeepers.
timeline: Arc<Timeline>,
@@ -331,7 +331,7 @@ impl WalreceiverState {
lagging_wal_timeout: Duration,
max_lsn_wal_lag: NonZeroU64,
) -> Self {
let id = ZTenantTimelineId {
let id = TenantTimelineId {
tenant_id: timeline.tenant_id,
timeline_id: timeline.timeline_id,
};
@@ -746,10 +746,10 @@ enum ReconnectReason {
}
fn wal_stream_connection_string(
ZTenantTimelineId {
TenantTimelineId {
tenant_id,
timeline_id,
}: ZTenantTimelineId,
}: TenantTimelineId,
listen_pg_addr_str: &str,
) -> anyhow::Result<String> {
let sk_connstr = format!("postgresql://no_user@{listen_pg_addr_str}/no_db");
@@ -760,7 +760,7 @@ fn wal_stream_connection_string(
})?;
let (host, port) = utils::connstring::connection_host_port(&me_conf);
Ok(format!(
"host={host} port={port} options='-c ztimelineid={timeline_id} ztenantid={tenant_id}'"
"host={host} port={port} options='-c timeline_id={timeline_id} tenant_id={tenant_id}'"
))
}
@@ -1355,7 +1355,7 @@ mod tests {
fn dummy_state(harness: &TenantHarness) -> WalreceiverState {
WalreceiverState {
id: ZTenantTimelineId {
id: TenantTimelineId {
tenant_id: harness.tenant_id,
timeline_id: TIMELINE_ID,
},

View File

@@ -30,7 +30,7 @@ use crate::{
walrecord::DecodedWALRecord,
};
use postgres_ffi::v14::waldecoder::WalStreamDecoder;
use utils::zid::ZTenantTimelineId;
use utils::id::TenantTimelineId;
use utils::{lsn::Lsn, pq_proto::ReplicationFeedback};
/// Status of the connection.
@@ -288,7 +288,7 @@ pub async fn handle_walreceiver_connection(
.await
// here we either do not have this timeline in remote index
// or there were no checkpoints for it yet
.timeline_entry(&ZTenantTimelineId {
.timeline_entry(&TenantTimelineId {
tenant_id,
timeline_id,
})
@@ -316,7 +316,7 @@ pub async fn handle_walreceiver_connection(
};
*timeline.last_received_wal.lock().unwrap() = Some(last_received_wal);
// Send zenith feedback message.
// Send the replication feedback message.
// Regular standby_status_update fields are put into this message.
let status_update = ReplicationFeedback {
current_timeline_size: timeline
@@ -328,7 +328,7 @@ pub async fn handle_walreceiver_connection(
ps_replytime: ts,
};
debug!("zenith_status_update {status_update:?}");
debug!("neon_status_update {status_update:?}");
let mut data = BytesMut::new();
status_update.serialize(&mut data)?;

View File

@@ -13,10 +13,10 @@ use serde::{Deserialize, Serialize};
use tracing::*;
use utils::bin_ser::DeserializeError;
/// Each update to a page is represented by a ZenithWalRecord. It can be a wrapper
/// around a PostgreSQL WAL record, or a custom zenith-specific "record".
/// Each update to a page is represented by a NeonWalRecord. It can be a wrapper
/// around a PostgreSQL WAL record, or a custom neon-specific "record".
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum ZenithWalRecord {
pub enum NeonWalRecord {
/// Native PostgreSQL WAL record
Postgres { will_init: bool, rec: Bytes },
@@ -45,14 +45,14 @@ pub enum ZenithWalRecord {
},
}
impl ZenithWalRecord {
impl NeonWalRecord {
/// Does replaying this WAL record initialize the page from scratch, or does
/// it need to be applied over the previous image of the page?
pub fn will_init(&self) -> bool {
match self {
ZenithWalRecord::Postgres { will_init, rec: _ } => *will_init,
NeonWalRecord::Postgres { will_init, rec: _ } => *will_init,
// None of the special zenith record types currently initialize the page
// None of the special neon record types currently initialize the page
_ => false,
}
}
@@ -767,9 +767,9 @@ pub fn decode_wal_record(
/// Build a human-readable string to describe a WAL record
///
/// For debugging purposes
pub fn describe_wal_record(rec: &ZenithWalRecord) -> Result<String, DeserializeError> {
pub fn describe_wal_record(rec: &NeonWalRecord) -> Result<String, DeserializeError> {
match rec {
ZenithWalRecord::Postgres { will_init, rec } => Ok(format!(
NeonWalRecord::Postgres { will_init, rec } => Ok(format!(
"will_init: {}, {}",
will_init,
describe_postgres_wal_record(rec)?

View File

@@ -36,7 +36,7 @@ use std::sync::Mutex;
use std::time::Duration;
use std::time::Instant;
use tracing::*;
use utils::{bin_ser::BeSer, lsn::Lsn, nonblock::set_nonblock, zid::ZTenantId};
use utils::{bin_ser::BeSer, id::TenantId, lsn::Lsn, nonblock::set_nonblock};
use crate::metrics::{
WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME, WAL_REDO_WAIT_TIME,
@@ -44,7 +44,7 @@ use crate::metrics::{
use crate::pgdatadir_mapping::{key_to_rel_block, key_to_slru_block};
use crate::reltag::{RelTag, SlruKind};
use crate::repository::Key;
use crate::walrecord::ZenithWalRecord;
use crate::walrecord::NeonWalRecord;
use crate::{config::PageServerConf, TEMP_FILE_SUFFIX};
use postgres_ffi::v14::nonrelfile_utils::{
mx_offset_to_flags_bitshift, mx_offset_to_flags_offset, mx_offset_to_member_offset,
@@ -81,7 +81,7 @@ pub trait WalRedoManager: Send + Sync {
key: Key,
lsn: Lsn,
base_img: Option<Bytes>,
records: Vec<(Lsn, ZenithWalRecord)>,
records: Vec<(Lsn, NeonWalRecord)>,
) -> Result<Bytes, WalRedoError>;
}
@@ -93,20 +93,20 @@ pub trait WalRedoManager: Send + Sync {
/// records.
///
pub struct PostgresRedoManager {
tenantid: ZTenantId,
tenant_id: TenantId,
conf: &'static PageServerConf,
process: Mutex<Option<PostgresRedoProcess>>,
}
/// Can this request be served by zenith redo functions
/// Can this request be served by neon redo functions
/// or we need to pass it to wal-redo postgres process?
fn can_apply_in_zenith(rec: &ZenithWalRecord) -> bool {
fn can_apply_in_neon(rec: &NeonWalRecord) -> bool {
// Currently, we don't have bespoken Rust code to replay any
// Postgres WAL records. But everything else is handled in zenith.
// Postgres WAL records. But everything else is handled in neon.
#[allow(clippy::match_like_matches_macro)]
match rec {
ZenithWalRecord::Postgres {
NeonWalRecord::Postgres {
will_init: _,
rec: _,
} => false,
@@ -143,7 +143,7 @@ impl WalRedoManager for PostgresRedoManager {
key: Key,
lsn: Lsn,
base_img: Option<Bytes>,
records: Vec<(Lsn, ZenithWalRecord)>,
records: Vec<(Lsn, NeonWalRecord)>,
) -> Result<Bytes, WalRedoError> {
if records.is_empty() {
error!("invalid WAL redo request with no records");
@@ -151,14 +151,14 @@ impl WalRedoManager for PostgresRedoManager {
}
let mut img: Option<Bytes> = base_img;
let mut batch_zenith = can_apply_in_zenith(&records[0].1);
let mut batch_neon = can_apply_in_neon(&records[0].1);
let mut batch_start = 0;
for i in 1..records.len() {
let rec_zenith = can_apply_in_zenith(&records[i].1);
let rec_neon = can_apply_in_neon(&records[i].1);
if rec_zenith != batch_zenith {
let result = if batch_zenith {
self.apply_batch_zenith(key, lsn, img, &records[batch_start..i])
if rec_neon != batch_neon {
let result = if batch_neon {
self.apply_batch_neon(key, lsn, img, &records[batch_start..i])
} else {
self.apply_batch_postgres(
key,
@@ -170,13 +170,13 @@ impl WalRedoManager for PostgresRedoManager {
};
img = Some(result?);
batch_zenith = rec_zenith;
batch_neon = rec_neon;
batch_start = i;
}
}
// last batch
if batch_zenith {
self.apply_batch_zenith(key, lsn, img, &records[batch_start..])
if batch_neon {
self.apply_batch_neon(key, lsn, img, &records[batch_start..])
} else {
self.apply_batch_postgres(
key,
@@ -193,10 +193,10 @@ impl PostgresRedoManager {
///
/// Create a new PostgresRedoManager.
///
pub fn new(conf: &'static PageServerConf, tenantid: ZTenantId) -> PostgresRedoManager {
pub fn new(conf: &'static PageServerConf, tenant_id: TenantId) -> PostgresRedoManager {
// The actual process is launched lazily, on first request.
PostgresRedoManager {
tenantid,
tenant_id,
conf,
process: Mutex::new(None),
}
@@ -210,7 +210,7 @@ impl PostgresRedoManager {
key: Key,
lsn: Lsn,
base_img: Option<Bytes>,
records: &[(Lsn, ZenithWalRecord)],
records: &[(Lsn, NeonWalRecord)],
wal_redo_timeout: Duration,
) -> Result<Bytes, WalRedoError> {
let (rel, blknum) = key_to_rel_block(key).or(Err(WalRedoError::InvalidRecord))?;
@@ -222,7 +222,7 @@ impl PostgresRedoManager {
// launch the WAL redo process on first use
if process_guard.is_none() {
let p = PostgresRedoProcess::launch(self.conf, &self.tenantid)?;
let p = PostgresRedoProcess::launch(self.conf, &self.tenant_id)?;
*process_guard = Some(p);
}
let process = process_guard.as_mut().unwrap();
@@ -263,14 +263,14 @@ impl PostgresRedoManager {
}
///
/// Process a batch of WAL records using bespoken Zenith code.
/// Process a batch of WAL records using bespoken Neon code.
///
fn apply_batch_zenith(
fn apply_batch_neon(
&self,
key: Key,
lsn: Lsn,
base_img: Option<Bytes>,
records: &[(Lsn, ZenithWalRecord)],
records: &[(Lsn, NeonWalRecord)],
) -> Result<Bytes, WalRedoError> {
let start_time = Instant::now();
@@ -280,13 +280,13 @@ impl PostgresRedoManager {
page.extend_from_slice(&fpi[..]);
} else {
// All the current WAL record types that we can handle require a base image.
error!("invalid zenith WAL redo request with no base image");
error!("invalid neon WAL redo request with no base image");
return Err(WalRedoError::InvalidRequest);
}
// Apply all the WAL records in the batch
for (record_lsn, record) in records.iter() {
self.apply_record_zenith(key, &mut page, *record_lsn, record)?;
self.apply_record_neon(key, &mut page, *record_lsn, record)?;
}
// Success!
let end_time = Instant::now();
@@ -294,7 +294,7 @@ impl PostgresRedoManager {
WAL_REDO_TIME.observe(duration.as_secs_f64());
debug!(
"zenith applied {} WAL records in {} ms to reconstruct page image at LSN {}",
"neon applied {} WAL records in {} ms to reconstruct page image at LSN {}",
records.len(),
duration.as_micros(),
lsn
@@ -303,22 +303,22 @@ impl PostgresRedoManager {
Ok(page.freeze())
}
fn apply_record_zenith(
fn apply_record_neon(
&self,
key: Key,
page: &mut BytesMut,
_record_lsn: Lsn,
record: &ZenithWalRecord,
record: &NeonWalRecord,
) -> Result<(), WalRedoError> {
match record {
ZenithWalRecord::Postgres {
NeonWalRecord::Postgres {
will_init: _,
rec: _,
} => {
error!("tried to pass postgres wal record to zenith WAL redo");
error!("tried to pass postgres wal record to neon WAL redo");
return Err(WalRedoError::InvalidRequest);
}
ZenithWalRecord::ClearVisibilityMapFlags {
NeonWalRecord::ClearVisibilityMapFlags {
new_heap_blkno,
old_heap_blkno,
flags,
@@ -360,7 +360,7 @@ impl PostgresRedoManager {
}
// Non-relational WAL records are handled here, with custom code that has the
// same effects as the corresponding Postgres WAL redo function.
ZenithWalRecord::ClogSetCommitted { xids, timestamp } => {
NeonWalRecord::ClogSetCommitted { xids, timestamp } => {
let (slru_kind, segno, blknum) =
key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?;
assert_eq!(
@@ -410,7 +410,7 @@ impl PostgresRedoManager {
);
}
}
ZenithWalRecord::ClogSetAborted { xids } => {
NeonWalRecord::ClogSetAborted { xids } => {
let (slru_kind, segno, blknum) =
key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?;
assert_eq!(
@@ -441,7 +441,7 @@ impl PostgresRedoManager {
transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page);
}
}
ZenithWalRecord::MultixactOffsetCreate { mid, moff } => {
NeonWalRecord::MultixactOffsetCreate { mid, moff } => {
let (slru_kind, segno, blknum) =
key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?;
assert_eq!(
@@ -474,7 +474,7 @@ impl PostgresRedoManager {
LittleEndian::write_u32(&mut page[offset..offset + 4], *moff);
}
ZenithWalRecord::MultixactMembersCreate { moff, members } => {
NeonWalRecord::MultixactMembersCreate { moff, members } => {
let (slru_kind, segno, blknum) =
key_to_slru_block(key).or(Err(WalRedoError::InvalidRecord))?;
assert_eq!(
@@ -570,7 +570,7 @@ impl PostgresRedoProcess {
//
// Start postgres binary in special WAL redo mode.
//
fn launch(conf: &PageServerConf, tenant_id: &ZTenantId) -> Result<PostgresRedoProcess, Error> {
fn launch(conf: &PageServerConf, tenant_id: &TenantId) -> Result<PostgresRedoProcess, Error> {
// FIXME: We need a dummy Postgres cluster to run the process in. Currently, we
// just create one with constant name. That fails if you try to launch more than
// one WAL redo manager concurrently.
@@ -686,7 +686,7 @@ impl PostgresRedoProcess {
&mut self,
tag: BufferTag,
base_img: Option<Bytes>,
records: &[(Lsn, ZenithWalRecord)],
records: &[(Lsn, NeonWalRecord)],
wal_redo_timeout: Duration,
) -> Result<Bytes, std::io::Error> {
// Serialize all the messages to send the WAL redo process first.
@@ -700,7 +700,7 @@ impl PostgresRedoProcess {
build_push_page_msg(tag, &img, &mut writebuf);
}
for (lsn, rec) in records.iter() {
if let ZenithWalRecord::Postgres {
if let NeonWalRecord::Postgres {
will_init: _,
rec: postgres_rec,
} = rec
@@ -709,7 +709,7 @@ impl PostgresRedoProcess {
} else {
return Err(Error::new(
ErrorKind::Other,
"tried to pass zenith wal record to postgres WAL redo",
"tried to pass neon wal record to postgres WAL redo",
));
}
}

View File

@@ -86,7 +86,7 @@ inmem_exists(SMgrRelation reln, ForkNumber forknum)
}
/*
* inmem_create() -- Create a new relation on zenithd storage
* inmem_create() -- Create a new relation on neon storage
*
* If isRedo is true, it's okay for the relation to exist already.
*/

View File

@@ -30,13 +30,12 @@
#include "walproposer.h"
#include "walproposer_utils.h"
#define PageStoreTrace DEBUG5
#define NEON_TAG "[NEON_SMGR] "
#define neon_log(tag, fmt, ...) ereport(tag, \
(errmsg(NEON_TAG fmt, ## __VA_ARGS__), \
errhidestmt(true), errhidecontext(true)))
#define neon_log(tag, fmt, ...) ereport(tag, \
(errmsg(NEON_TAG fmt, ##__VA_ARGS__), \
errhidestmt(true), errhidecontext(true)))
bool connected = false;
PGconn *pageserver_conn = NULL;
@@ -65,7 +64,7 @@ pageserver_connect()
errdetail_internal("%s", msg)));
}
query = psprintf("pagestream %s %s", zenith_tenant, zenith_timeline);
query = psprintf("pagestream %s %s", neon_tenant, neon_timeline);
ret = PQsendQuery(pageserver_conn, query);
if (ret != 1)
{
@@ -169,7 +168,7 @@ pageserver_disconnect(void)
}
static void
pageserver_send(ZenithRequest *request)
pageserver_send(NeonRequest * request)
{
StringInfoData req_buff;
@@ -205,18 +204,18 @@ pageserver_send(ZenithRequest *request)
if (message_level_is_interesting(PageStoreTrace))
{
char *msg = zm_to_string((ZenithMessage *) request);
char *msg = zm_to_string((NeonMessage *) request);
neon_log(PageStoreTrace, "sent request: %s", msg);
pfree(msg);
}
}
static ZenithResponse *
static NeonResponse *
pageserver_receive(void)
{
StringInfoData resp_buff;
ZenithResponse *resp;
NeonResponse *resp;
PG_TRY();
{
@@ -236,7 +235,7 @@ pageserver_receive(void)
if (message_level_is_interesting(PageStoreTrace))
{
char *msg = zm_to_string((ZenithMessage *) resp);
char *msg = zm_to_string((NeonMessage *) resp);
neon_log(PageStoreTrace, "got response: %s", msg);
pfree(msg);
@@ -249,7 +248,7 @@ pageserver_receive(void)
}
PG_END_TRY();
return (ZenithResponse *) resp;
return (NeonResponse *) resp;
}
@@ -265,8 +264,8 @@ pageserver_flush(void)
}
}
static ZenithResponse *
pageserver_call(ZenithRequest *request)
static NeonResponse *
pageserver_call(NeonRequest * request)
{
pageserver_send(request);
pageserver_flush();
@@ -281,7 +280,7 @@ page_server_api api = {
};
static bool
check_zenith_id(char **newval, void **extra, GucSource source)
check_neon_id(char **newval, void **extra, GucSource source)
{
uint8 zid[16];
@@ -403,22 +402,22 @@ pg_init_libpagestore(void)
NULL, NULL, NULL);
DefineCustomStringVariable("neon.timeline_id",
"Zenith timelineid the server is running on",
"Neon timeline_id the server is running on",
NULL,
&zenith_timeline,
&neon_timeline,
"",
PGC_POSTMASTER,
0, /* no flags required */
check_zenith_id, NULL, NULL);
check_neon_id, NULL, NULL);
DefineCustomStringVariable("neon.tenant_id",
"Neon tenantid the server is running on",
"Neon tenant_id the server is running on",
NULL,
&zenith_tenant,
&neon_tenant,
"",
PGC_POSTMASTER,
0, /* no flags required */
check_zenith_id, NULL, NULL);
check_neon_id, NULL, NULL);
DefineCustomBoolVariable("neon.wal_redo",
"start in wal-redo mode",
@@ -450,8 +449,8 @@ pg_init_libpagestore(void)
page_server_connstring = substitute_pageserver_password(page_server_connstring_raw);
/* Is there more correct way to pass CustomGUC to postgres code? */
zenith_timeline_walproposer = zenith_timeline;
zenith_tenant_walproposer = zenith_tenant;
neon_timeline_walproposer = neon_timeline;
neon_tenant_walproposer = neon_tenant;
if (wal_redo)
{
@@ -462,8 +461,8 @@ pg_init_libpagestore(void)
else if (page_server_connstring && page_server_connstring[0])
{
neon_log(PageStoreTrace, "set neon_smgr hook");
smgr_hook = smgr_zenith;
smgr_init_hook = smgr_init_zenith;
dbsize_hook = zenith_dbsize;
smgr_hook = smgr_neon;
smgr_init_hook = smgr_init_neon;
dbsize_hook = neon_dbsize;
}
}

View File

@@ -28,7 +28,6 @@
PG_MODULE_MAGIC;
void _PG_init(void);
void
_PG_init(void)
{
@@ -56,7 +55,6 @@ pg_cluster_size(PG_FUNCTION_ARGS)
PG_RETURN_INT64(size);
}
Datum
backpressure_lsns(PG_FUNCTION_ARGS)
{

View File

@@ -28,31 +28,29 @@
typedef enum
{
/* pagestore_client -> pagestore */
T_ZenithExistsRequest = 0,
T_ZenithNblocksRequest,
T_ZenithGetPageRequest,
T_ZenithDbSizeRequest,
T_NeonExistsRequest = 0,
T_NeonNblocksRequest,
T_NeonGetPageRequest,
T_NeonDbSizeRequest,
/* pagestore -> pagestore_client */
T_ZenithExistsResponse = 100,
T_ZenithNblocksResponse,
T_ZenithGetPageResponse,
T_ZenithErrorResponse,
T_ZenithDbSizeResponse,
} ZenithMessageTag;
T_NeonExistsResponse = 100,
T_NeonNblocksResponse,
T_NeonGetPageResponse,
T_NeonErrorResponse,
T_NeonDbSizeResponse,
} NeonMessageTag;
/* base struct for c-style inheritance */
typedef struct
{
ZenithMessageTag tag;
} ZenithMessage;
NeonMessageTag tag;
} NeonMessage;
#define messageTag(m) (((const ZenithMessage *)(m))->tag)
#define messageTag(m) (((const NeonMessage *)(m))->tag)
/*
* supertype of all the Zenith*Request structs below
* supertype of all the Neon*Request structs below
*
* If 'latest' is true, we are requesting the latest page version, and 'lsn'
* is just a hint to the server that we know there are no versions of the page
@@ -60,81 +58,79 @@ typedef struct
*/
typedef struct
{
ZenithMessageTag tag;
NeonMessageTag tag;
bool latest; /* if true, request latest page version */
XLogRecPtr lsn; /* request page version @ this LSN */
} ZenithRequest;
} NeonRequest;
typedef struct
{
ZenithRequest req;
NeonRequest req;
RelFileNode rnode;
ForkNumber forknum;
} ZenithExistsRequest;
} NeonExistsRequest;
typedef struct
{
ZenithRequest req;
NeonRequest req;
RelFileNode rnode;
ForkNumber forknum;
} ZenithNblocksRequest;
} NeonNblocksRequest;
typedef struct
{
ZenithRequest req;
NeonRequest req;
Oid dbNode;
} ZenithDbSizeRequest;
} NeonDbSizeRequest;
typedef struct
{
ZenithRequest req;
NeonRequest req;
RelFileNode rnode;
ForkNumber forknum;
BlockNumber blkno;
} ZenithGetPageRequest;
} NeonGetPageRequest;
/* supertype of all the Zenith*Response structs below */
/* supertype of all the Neon*Response structs below */
typedef struct
{
ZenithMessageTag tag;
} ZenithResponse;
NeonMessageTag tag;
} NeonResponse;
typedef struct
{
ZenithMessageTag tag;
NeonMessageTag tag;
bool exists;
} ZenithExistsResponse;
} NeonExistsResponse;
typedef struct
{
ZenithMessageTag tag;
NeonMessageTag tag;
uint32 n_blocks;
} ZenithNblocksResponse;
} NeonNblocksResponse;
typedef struct
{
ZenithMessageTag tag;
NeonMessageTag tag;
char page[FLEXIBLE_ARRAY_MEMBER];
} ZenithGetPageResponse;
} NeonGetPageResponse;
typedef struct
{
ZenithMessageTag tag;
NeonMessageTag tag;
int64 db_size;
} ZenithDbSizeResponse;
} NeonDbSizeResponse;
typedef struct
{
ZenithMessageTag tag;
NeonMessageTag tag;
char message[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated error
* message */
} ZenithErrorResponse;
} NeonErrorResponse;
extern StringInfoData zm_pack_request(ZenithRequest *msg);
extern ZenithResponse *zm_unpack_response(StringInfo s);
extern char *zm_to_string(ZenithMessage *msg);
extern StringInfoData zm_pack_request(NeonRequest * msg);
extern NeonResponse * zm_unpack_response(StringInfo s);
extern char *zm_to_string(NeonMessage * msg);
/*
* API
@@ -142,57 +138,57 @@ extern char *zm_to_string(ZenithMessage *msg);
typedef struct
{
ZenithResponse *(*request) (ZenithRequest *request);
void (*send) (ZenithRequest *request);
ZenithResponse *(*receive) (void);
NeonResponse *(*request) (NeonRequest * request);
void (*send) (NeonRequest * request);
NeonResponse *(*receive) (void);
void (*flush) (void);
} page_server_api;
extern page_server_api * page_server;
extern char *page_server_connstring;
extern char *zenith_timeline;
extern char *zenith_tenant;
extern char *neon_timeline;
extern char *neon_tenant;
extern bool wal_redo;
extern int32 max_cluster_size;
extern const f_smgr *smgr_zenith(BackendId backend, RelFileNode rnode);
extern void smgr_init_zenith(void);
extern const f_smgr *smgr_neon(BackendId backend, RelFileNode rnode);
extern void smgr_init_neon(void);
extern const f_smgr *smgr_inmem(BackendId backend, RelFileNode rnode);
extern void smgr_init_inmem(void);
extern void smgr_shutdown_inmem(void);
/* zenith storage manager functionality */
/* Neon storage manager functionality */
extern void zenith_init(void);
extern void zenith_open(SMgrRelation reln);
extern void zenith_close(SMgrRelation reln, ForkNumber forknum);
extern void zenith_create(SMgrRelation reln, ForkNumber forknum, bool isRedo);
extern bool zenith_exists(SMgrRelation reln, ForkNumber forknum);
extern void zenith_unlink(RelFileNodeBackend rnode, ForkNumber forknum, bool isRedo);
extern void zenith_extend(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
extern bool zenith_prefetch(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum);
extern void zenith_reset_prefetch(SMgrRelation reln);
extern void zenith_read(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer);
extern void neon_init(void);
extern void neon_open(SMgrRelation reln);
extern void neon_close(SMgrRelation reln, ForkNumber forknum);
extern void neon_create(SMgrRelation reln, ForkNumber forknum, bool isRedo);
extern bool neon_exists(SMgrRelation reln, ForkNumber forknum);
extern void neon_unlink(RelFileNodeBackend rnode, ForkNumber forknum, bool isRedo);
extern void neon_extend(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
extern bool neon_prefetch(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum);
extern void neon_reset_prefetch(SMgrRelation reln);
extern void neon_read(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer);
extern void zenith_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno,
XLogRecPtr request_lsn, bool request_latest, char *buffer);
extern void neon_read_at_lsn(RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno,
XLogRecPtr request_lsn, bool request_latest, char *buffer);
extern void zenith_write(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
extern void zenith_writeback(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, BlockNumber nblocks);
extern BlockNumber zenith_nblocks(SMgrRelation reln, ForkNumber forknum);
extern int64 zenith_dbsize(Oid dbNode);
extern void zenith_truncate(SMgrRelation reln, ForkNumber forknum,
BlockNumber nblocks);
extern void zenith_immedsync(SMgrRelation reln, ForkNumber forknum);
extern void neon_write(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
extern void neon_writeback(SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, BlockNumber nblocks);
extern BlockNumber neon_nblocks(SMgrRelation reln, ForkNumber forknum);
extern int64 neon_dbsize(Oid dbNode);
extern void neon_truncate(SMgrRelation reln, ForkNumber forknum,
BlockNumber nblocks);
extern void neon_immedsync(SMgrRelation reln, ForkNumber forknum);
/* zenith wal-redo storage manager functionality */
/* neon wal-redo storage manager functionality */
extern void inmem_init(void);
extern void inmem_open(SMgrRelation reln);
@@ -215,8 +211,7 @@ extern void inmem_truncate(SMgrRelation reln, ForkNumber forknum,
BlockNumber nblocks);
extern void inmem_immedsync(SMgrRelation reln, ForkNumber forknum);
/* utils for zenith relsize cache */
/* utils for neon relsize cache */
extern void relsize_hash_init(void);
extern bool get_cached_relsize(RelFileNode rnode, ForkNumber forknum, BlockNumber *size);
extern void set_cached_relsize(RelFileNode rnode, ForkNumber forknum, BlockNumber size);

File diff suppressed because it is too large Load Diff

View File

@@ -56,7 +56,7 @@ static void relsize_shmem_request(void);
#define DEFAULT_RELSIZE_HASH_SIZE (64 * 1024)
static void
zenith_smgr_shmem_startup(void)
neon_smgr_shmem_startup(void)
{
static HASHCTL info;
@@ -174,14 +174,14 @@ relsize_hash_init(void)
#endif
prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = zenith_smgr_shmem_startup;
shmem_startup_hook = neon_smgr_shmem_startup;
}
}
#if PG_VERSION_NUM >= 150000
/*
* shmem_request hook: request additional shared resources. We'll allocate or
* attach to the shared resources in zenith_smgr_shmem_startup().
* attach to the shared resources in neon_smgr_shmem_startup().
*/
static void
relsize_shmem_request(void)

View File

@@ -71,14 +71,13 @@
#include "walproposer_utils.h"
#include "replication/walpropshim.h"
char *wal_acceptors_list;
int wal_acceptor_reconnect_timeout;
int wal_acceptor_connect_timeout;
bool am_wal_proposer;
char *zenith_timeline_walproposer = NULL;
char *zenith_tenant_walproposer = NULL;
char *neon_timeline_walproposer = NULL;
char *neon_tenant_walproposer = NULL;
/* Declared in walproposer.h, defined here, initialized in libpqwalproposer.c */
WalProposerFunctionsType *WalProposerFunctions = NULL;
@@ -89,7 +88,7 @@ static int n_safekeepers = 0;
static int quorum = 0;
static Safekeeper safekeeper[MAX_SAFEKEEPERS];
static XLogRecPtr availableLsn; /* WAL has been generated up to this point */
static XLogRecPtr lastSentCommitLsn; /* last commitLsn broadcast to
static XLogRecPtr lastSentCommitLsn; /* last commitLsn broadcast to*
* safekeepers */
static ProposerGreeting greetRequest;
static VoteRequest voteRequest; /* Vote request for safekeeper */
@@ -162,7 +161,6 @@ static bool BlockingWrite(Safekeeper *sk, void *msg, size_t msg_size, Safekeeper
static bool AsyncWrite(Safekeeper *sk, void *msg, size_t msg_size, SafekeeperState flush_state);
static bool AsyncFlush(Safekeeper *sk);
static void nwp_shmem_startup_hook(void);
static void nwp_register_gucs(void);
static void nwp_prepare_shmem(void);
@@ -176,7 +174,6 @@ static shmem_request_hook_type prev_shmem_request_hook = NULL;
static void walproposer_shmem_request(void);
#endif
void
pg_init_walproposer(void)
{
@@ -207,10 +204,9 @@ nwp_register_gucs(void)
&wal_acceptors_list, /* valueAddr */
"", /* bootValue */
PGC_POSTMASTER,
GUC_LIST_INPUT, /* extensions can't use
GUC_LIST_INPUT, /* extensions can't use*
* GUC_LIST_QUOTE */
NULL, NULL, NULL
);
NULL, NULL, NULL);
DefineCustomIntVariable(
"neon.safekeeper_reconnect_timeout",
@@ -220,8 +216,7 @@ nwp_register_gucs(void)
1000, 0, INT_MAX, /* default, min, max */
PGC_SIGHUP, /* context */
GUC_UNIT_MS, /* flags */
NULL, NULL, NULL
);
NULL, NULL, NULL);
DefineCustomIntVariable(
"neon.safekeeper_connect_timeout",
@@ -231,9 +226,7 @@ nwp_register_gucs(void)
5000, 0, INT_MAX,
PGC_SIGHUP,
GUC_UNIT_MS,
NULL, NULL, NULL
);
NULL, NULL, NULL);
}
/* shmem handling */
@@ -499,19 +492,19 @@ WalProposerInitImpl(XLogRecPtr flushRecPtr, uint64 systemId)
greetRequest.pgVersion = PG_VERSION_NUM;
pg_strong_random(&greetRequest.proposerId, sizeof(greetRequest.proposerId));
greetRequest.systemId = systemId;
if (!zenith_timeline_walproposer)
if (!neon_timeline_walproposer)
elog(FATAL, "neon.timeline_id is not provided");
if (*zenith_timeline_walproposer != '\0' &&
!HexDecodeString(greetRequest.ztimelineid, zenith_timeline_walproposer, 16))
elog(FATAL, "Could not parse neon.timeline_id, %s", zenith_timeline_walproposer);
if (!zenith_tenant_walproposer)
if (*neon_timeline_walproposer != '\0' &&
!HexDecodeString(greetRequest.timeline_id, neon_timeline_walproposer, 16))
elog(FATAL, "Could not parse neon.timeline_id, %s", neon_timeline_walproposer);
if (!neon_tenant_walproposer)
elog(FATAL, "neon.tenant_id is not provided");
if (*zenith_tenant_walproposer != '\0' &&
!HexDecodeString(greetRequest.ztenantid, zenith_tenant_walproposer, 16))
elog(FATAL, "Could not parse neon.tenant_id, %s", zenith_tenant_walproposer);
if (*neon_tenant_walproposer != '\0' &&
!HexDecodeString(greetRequest.tenant_id, neon_tenant_walproposer, 16))
elog(FATAL, "Could not parse neon.tenant_id, %s", neon_tenant_walproposer);
#if PG_VERSION_NUM >= 150000
/* FIXME don't use hardcoded timeline id */
/* FIXME don't use hardcoded timeline id */
greetRequest.timeline = 1;
#else
greetRequest.timeline = ThisTimeLineID;
@@ -657,8 +650,8 @@ ResetConnection(Safekeeper *sk)
int written = 0;
written = snprintf((char *) &sk->conninfo, MAXCONNINFO,
"host=%s port=%s dbname=replication options='-c ztimelineid=%s ztenantid=%s'",
sk->host, sk->port, zenith_timeline_walproposer, zenith_tenant_walproposer);
"host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'",
sk->host, sk->port, neon_timeline_walproposer, neon_tenant_walproposer);
/*
* currently connection string is not that long, but once we pass
@@ -1326,8 +1319,7 @@ DetermineEpochStartLsn(void)
propTerm,
LSN_FORMAT_ARGS(propEpochStartLsn),
safekeeper[donor].host, safekeeper[donor].port,
LSN_FORMAT_ARGS(truncateLsn)
);
LSN_FORMAT_ARGS(truncateLsn));
/*
* Ensure the basebackup we are running (at RedoStartLsn) matches LSN
@@ -1373,8 +1365,8 @@ WalProposerRecovery(int donor, TimeLineID timeline, XLogRecPtr startpos, XLogRec
WalReceiverConn *wrconn;
WalRcvStreamOptions options;
sprintf(conninfo, "host=%s port=%s dbname=replication options='-c ztimelineid=%s ztenantid=%s'",
safekeeper[donor].host, safekeeper[donor].port, zenith_timeline_walproposer, zenith_tenant_walproposer);
sprintf(conninfo, "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'",
safekeeper[donor].host, safekeeper[donor].port, neon_timeline_walproposer, neon_tenant_walproposer);
wrconn = walrcv_connect(conninfo, false, "wal_proposer_recovery", &err);
if (!wrconn)
{
@@ -1544,8 +1536,7 @@ SendProposerElected(Safekeeper *sk)
else
{
XLogRecPtr propEndLsn = propTermHistory.entries[i + 1].lsn;
XLogRecPtr skEndLsn = (i + 1 < th->n_entries ? th->entries[i + 1].lsn :
sk->voteResponse.flushLsn);
XLogRecPtr skEndLsn = (i + 1 < th->n_entries ? th->entries[i + 1].lsn : sk->voteResponse.flushLsn);
sk->startStreamingAt = Min(propEndLsn, skEndLsn);
}
@@ -1759,7 +1750,7 @@ SendAppendRequests(Safekeeper *sk)
req->beginLsn,
req->endLsn - req->beginLsn,
#if PG_VERSION_NUM >= 150000
/* FIXME don't use hardcoded timelineid here */
/* FIXME don't use hardcoded timeline_id here */
1,
#else
ThisTimeLineID,
@@ -1784,9 +1775,9 @@ SendAppendRequests(Safekeeper *sk)
case PG_ASYNC_WRITE_TRY_FLUSH:
/*
* We still need to call PQflush some more to finish the job.
* Caller function will handle this by setting right event
* set.
* * We still need to call PQflush some more to finish the
* job. Caller function will handle this by setting right
* event* set.
*/
sk->flushWrite = true;
return true;
@@ -1885,40 +1876,40 @@ ParseReplicationFeedbackMessage(StringInfo reply_message, ReplicationFeedback *
if (strcmp(key, "current_timeline_size") == 0)
{
pq_getmsgint(reply_message, sizeof(int32));
//read value length
rf->currentClusterSize = pq_getmsgint64(reply_message);
/* read value length */
rf->currentClusterSize = pq_getmsgint64(reply_message);
elog(DEBUG2, "ParseReplicationFeedbackMessage: current_timeline_size %lu",
rf->currentClusterSize);
}
else if (strcmp(key, "ps_writelsn") == 0)
{
pq_getmsgint(reply_message, sizeof(int32));
//read value length
rf->ps_writelsn = pq_getmsgint64(reply_message);
/* read value length */
rf->ps_writelsn = pq_getmsgint64(reply_message);
elog(DEBUG2, "ParseReplicationFeedbackMessage: ps_writelsn %X/%X",
LSN_FORMAT_ARGS(rf->ps_writelsn));
}
else if (strcmp(key, "ps_flushlsn") == 0)
{
pq_getmsgint(reply_message, sizeof(int32));
//read value length
rf->ps_flushlsn = pq_getmsgint64(reply_message);
/* read value length */
rf->ps_flushlsn = pq_getmsgint64(reply_message);
elog(DEBUG2, "ParseReplicationFeedbackMessage: ps_flushlsn %X/%X",
LSN_FORMAT_ARGS(rf->ps_flushlsn));
}
else if (strcmp(key, "ps_applylsn") == 0)
{
pq_getmsgint(reply_message, sizeof(int32));
//read value length
rf->ps_applylsn = pq_getmsgint64(reply_message);
/* read value length */
rf->ps_applylsn = pq_getmsgint64(reply_message);
elog(DEBUG2, "ParseReplicationFeedbackMessage: ps_applylsn %X/%X",
LSN_FORMAT_ARGS(rf->ps_applylsn));
}
else if (strcmp(key, "ps_replytime") == 0)
{
pq_getmsgint(reply_message, sizeof(int32));
//read value length
rf->ps_replytime = pq_getmsgint64(reply_message);
/* read value length */
rf->ps_replytime = pq_getmsgint64(reply_message);
{
char *replyTimeStr;
@@ -1933,13 +1924,13 @@ ParseReplicationFeedbackMessage(StringInfo reply_message, ReplicationFeedback *
else
{
len = pq_getmsgint(reply_message, sizeof(int32));
//read value length
/* read value length */
/*
* Skip unknown keys to support backward compatibile protocol
* changes
*/
elog(LOG, "ParseReplicationFeedbackMessage: unknown key: %s len %d", key, len);
elog(LOG, "ParseReplicationFeedbackMessage: unknown key: %s len %d", key, len);
pq_getmsgbytes(reply_message, len);
};
}
@@ -1973,7 +1964,6 @@ CombineHotStanbyFeedbacks(HotStandbyFeedback * hs)
}
}
/*
* Get minimum of flushed LSNs of all safekeepers, which is the LSN of the
* last WAL record that can be safely discarded.
@@ -2009,8 +1999,7 @@ GetAcknowledgedByQuorumWALPosition(void)
* Like in Raft, we aren't allowed to commit entries from previous
* terms, so ignore reported LSN until it gets to epochStartLsn.
*/
responses[i] = safekeeper[i].appendResponse.flushLsn >= propEpochStartLsn ?
safekeeper[i].appendResponse.flushLsn : 0;
responses[i] = safekeeper[i].appendResponse.flushLsn >= propEpochStartLsn ? safekeeper[i].appendResponse.flushLsn : 0;
}
qsort(responses, n_safekeepers, sizeof(XLogRecPtr), CompareLsn);
@@ -2058,7 +2047,6 @@ replication_feedback_set(ReplicationFeedback * rf)
SpinLockRelease(&walprop_shared->mutex);
}
void
replication_feedback_get_lsns(XLogRecPtr *writeLsn, XLogRecPtr *flushLsn, XLogRecPtr *applyLsn)
{
@@ -2069,12 +2057,11 @@ replication_feedback_get_lsns(XLogRecPtr *writeLsn, XLogRecPtr *flushLsn, XLogRe
SpinLockRelease(&walprop_shared->mutex);
}
/*
* Get ReplicationFeedback fields from the most advanced safekeeper
*/
static void
GetLatestZentihFeedback(ReplicationFeedback * rf)
GetLatestNeonFeedback(ReplicationFeedback * rf)
{
int latest_safekeeper = 0;
XLogRecPtr ps_writelsn = InvalidXLogRecPtr;
@@ -2094,7 +2081,7 @@ GetLatestZentihFeedback(ReplicationFeedback * rf)
rf->ps_applylsn = safekeeper[latest_safekeeper].appendResponse.rf.ps_applylsn;
rf->ps_replytime = safekeeper[latest_safekeeper].appendResponse.rf.ps_replytime;
elog(DEBUG2, "GetLatestZentihFeedback: currentClusterSize %lu,"
elog(DEBUG2, "GetLatestNeonFeedback: currentClusterSize %lu,"
" ps_writelsn %X/%X, ps_flushlsn %X/%X, ps_applylsn %X/%X, ps_replytime %lu",
rf->currentClusterSize,
LSN_FORMAT_ARGS(rf->ps_writelsn),
@@ -2113,14 +2100,13 @@ HandleSafekeeperResponse(void)
XLogRecPtr diskConsistentLsn;
XLogRecPtr minFlushLsn;
minQuorumLsn = GetAcknowledgedByQuorumWALPosition();
diskConsistentLsn = quorumFeedback.rf.ps_flushlsn;
if (!syncSafekeepers)
{
/* Get ReplicationFeedback fields from the most advanced safekeeper */
GetLatestZentihFeedback(&quorumFeedback.rf);
GetLatestNeonFeedback(&quorumFeedback.rf);
SetZenithCurrentClusterSize(quorumFeedback.rf.currentClusterSize);
}
@@ -2139,7 +2125,7 @@ HandleSafekeeperResponse(void)
quorumFeedback.flushLsn,
/*
* apply_lsn - This is what processed and durably saved at
* apply_lsn - This is what processed and durably saved at*
* pageserver.
*/
quorumFeedback.rf.ps_flushlsn,
@@ -2460,7 +2446,7 @@ backpressure_lag_impl(void)
XLogRecPtr myFlushLsn = GetFlushRecPtr();
#endif
replication_feedback_get_lsns(&writePtr, &flushPtr, &applyPtr);
#define MB ((XLogRecPtr)1024*1024)
#define MB ((XLogRecPtr)1024 * 1024)
elog(DEBUG2, "current flushLsn %X/%X ReplicationFeedback: write %X/%X flush %X/%X apply %X/%X",
LSN_FORMAT_ARGS(myFlushLsn),
@@ -2468,23 +2454,17 @@ backpressure_lag_impl(void)
LSN_FORMAT_ARGS(flushPtr),
LSN_FORMAT_ARGS(applyPtr));
if ((writePtr != InvalidXLogRecPtr
&& max_replication_write_lag > 0
&& myFlushLsn > writePtr + max_replication_write_lag * MB))
if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB))
{
return (myFlushLsn - writePtr - max_replication_write_lag * MB);
}
if ((flushPtr != InvalidXLogRecPtr
&& max_replication_flush_lag > 0
&& myFlushLsn > flushPtr + max_replication_flush_lag * MB))
if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB))
{
return (myFlushLsn - flushPtr - max_replication_flush_lag * MB);
}
if ((applyPtr != InvalidXLogRecPtr
&& max_replication_apply_lag > 0
&& myFlushLsn > applyPtr + max_replication_apply_lag * MB))
if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB))
{
return (myFlushLsn - applyPtr - max_replication_apply_lag * MB);
}

View File

@@ -10,16 +10,16 @@
#include "utils/uuid.h"
#include "replication/walreceiver.h"
#define SK_MAGIC 0xCafeCeefu
#define SK_PROTOCOL_VERSION 2
#define SK_MAGIC 0xCafeCeefu
#define SK_PROTOCOL_VERSION 2
#define MAX_SAFEKEEPERS 32
#define MAX_SEND_SIZE (XLOG_BLCKSZ * 16) /* max size of a single
* WAL message */
#define XLOG_HDR_SIZE (1+8*3) /* 'w' + startPos + walEnd + timestamp */
#define XLOG_HDR_START_POS 1 /* offset of start position in wal sender
#define MAX_SAFEKEEPERS 32
#define MAX_SEND_SIZE (XLOG_BLCKSZ * 16) /* max size of a single* WAL
* message */
#define XLOG_HDR_SIZE (1 + 8 * 3) /* 'w' + startPos + walEnd + timestamp */
#define XLOG_HDR_START_POS 1 /* offset of start position in wal sender*
* message header */
#define XLOG_HDR_END_POS (1+8) /* offset of end position in wal sender
#define XLOG_HDR_END_POS (1 + 8) /* offset of end position in wal sender*
* message header */
/*
@@ -39,8 +39,8 @@ typedef struct WalProposerConn WalProposerConn;
struct WalMessage;
typedef struct WalMessage WalMessage;
extern char *zenith_timeline_walproposer;
extern char *zenith_tenant_walproposer;
extern char *neon_timeline_walproposer;
extern char *neon_tenant_walproposer;
/* Possible return values from ReadPGAsync */
typedef enum
@@ -170,8 +170,8 @@ typedef struct ProposerGreeting
uint32 pgVersion;
pg_uuid_t proposerId;
uint64 systemId; /* Postgres system identifier */
uint8 ztimelineid[16]; /* Zenith timeline id */
uint8 ztenantid[16];
uint8 timeline_id[16]; /* Neon timeline id */
uint8 tenant_id[16];
TimeLineID timeline;
uint32 walSegSize;
} ProposerGreeting;
@@ -226,7 +226,7 @@ typedef struct VoteResponse
* proposer to choose the most advanced one.
*/
XLogRecPtr flushLsn;
XLogRecPtr truncateLsn; /* minimal LSN which may be needed for
XLogRecPtr truncateLsn; /* minimal LSN which may be needed for*
* recovery of some safekeeper */
TermHistory termHistory;
XLogRecPtr timelineStartLsn; /* timeline globally starts at this LSN */
@@ -283,7 +283,6 @@ typedef struct HotStandbyFeedback
FullTransactionId catalog_xmin;
} HotStandbyFeedback;
typedef struct ReplicationFeedback
{
/* current size of the timeline on pageserver */
@@ -295,7 +294,6 @@ typedef struct ReplicationFeedback
TimestampTz ps_replytime;
} ReplicationFeedback;
typedef struct WalproposerShmemState
{
slock_t mutex;
@@ -323,7 +321,7 @@ typedef struct AppendResponse
XLogRecPtr commitLsn;
HotStandbyFeedback hs;
/* Feedback recieved from pageserver includes standby_status_update fields */
/* and custom zenith feedback. */
/* and custom neon feedback. */
/* This part of the message is extensible. */
ReplicationFeedback rf;
} AppendResponse;
@@ -332,7 +330,6 @@ typedef struct AppendResponse
/* Other fields are fixed part */
#define APPENDRESPONSE_FIXEDPART_SIZE offsetof(AppendResponse, rf)
/*
* Descriptor of safekeeper
*/
@@ -340,7 +337,7 @@ typedef struct Safekeeper
{
char const *host;
char const *port;
char conninfo[MAXCONNINFO]; /* connection info for
char conninfo[MAXCONNINFO]; /* connection info for*
* connecting/reconnecting */
/*
@@ -366,12 +363,12 @@ typedef struct Safekeeper
*/
XLogRecPtr startStreamingAt;
bool flushWrite; /* set to true if we need to call AsyncFlush,
bool flushWrite; /* set to true if we need to call AsyncFlush,*
* to flush pending messages */
XLogRecPtr streamingAt; /* current streaming position */
AppendRequestHeader appendRequest; /* request for sending to safekeeper */
int eventPos; /* position in wait event set. Equal to -1 if
int eventPos; /* position in wait event set. Equal to -1 if*
* no event */
SafekeeperState state; /* safekeeper state machine state */
TimestampTz startedConnAt; /* when connection attempt started */
@@ -380,7 +377,6 @@ typedef struct Safekeeper
AppendResponse appendResponse; /* feedback for master */
} Safekeeper;
extern PGDLLIMPORT void WalProposerMain(Datum main_arg);
void WalProposerBroadcast(XLogRecPtr startpos, XLogRecPtr endpos);
void WalProposerPoll(void);

View File

@@ -36,13 +36,13 @@ PG_FUNCTION_INFO_V1(get_raw_page_at_lsn_ex);
PG_FUNCTION_INFO_V1(neon_xlogflush);
/*
* Linkage to functions in zenith module.
* Linkage to functions in neon module.
* The signature here would need to be updated whenever function parameters change in pagestore_smgr.c
*/
typedef void (*zenith_read_at_lsn_type) (RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno,
XLogRecPtr request_lsn, bool request_latest, char *buffer);
typedef void (*neon_read_at_lsn_type) (RelFileNode rnode, ForkNumber forkNum, BlockNumber blkno,
XLogRecPtr request_lsn, bool request_latest, char *buffer);
static zenith_read_at_lsn_type zenith_read_at_lsn_ptr;
static neon_read_at_lsn_type neon_read_at_lsn_ptr;
/*
* Module initialize function: fetch function pointers for cross-module calls.
@@ -51,13 +51,13 @@ void
_PG_init(void)
{
/* Asserts verify that typedefs above match original declarations */
AssertVariableIsOfType(&zenith_read_at_lsn, zenith_read_at_lsn_type);
zenith_read_at_lsn_ptr = (zenith_read_at_lsn_type)
load_external_function("$libdir/neon", "zenith_read_at_lsn",
AssertVariableIsOfType(&neon_read_at_lsn, neon_read_at_lsn_type);
neon_read_at_lsn_ptr = (neon_read_at_lsn_type)
load_external_function("$libdir/neon", "neon_read_at_lsn",
true, NULL);
}
#define zenith_read_at_lsn zenith_read_at_lsn_ptr
#define neon_read_at_lsn neon_read_at_lsn_ptr
/*
* test_consume_xids(int4), for rapidly consuming XIDs, to test wraparound.
@@ -96,7 +96,7 @@ test_consume_xids(PG_FUNCTION_ARGS)
Datum
clear_buffer_cache(PG_FUNCTION_ARGS)
{
bool save_zenith_test_evict;
bool save_neon_test_evict;
/*
* Temporarily set the zenith_test_evict GUC, so that when we pin and
@@ -104,7 +104,7 @@ clear_buffer_cache(PG_FUNCTION_ARGS)
* buffers, as there is no explicit "evict this buffer" function in the
* buffer manager.
*/
save_zenith_test_evict = zenith_test_evict;
save_neon_test_evict = zenith_test_evict;
zenith_test_evict = true;
PG_TRY();
{
@@ -149,14 +149,13 @@ clear_buffer_cache(PG_FUNCTION_ARGS)
PG_FINALLY();
{
/* restore the GUC */
zenith_test_evict = save_zenith_test_evict;
zenith_test_evict = save_neon_test_evict;
}
PG_END_TRY();
PG_RETURN_VOID();
}
/*
* Reads the page from page server without buffer cache
* usage mimics get_raw_page() in pageinspect, but offers reading versions at specific LSN
@@ -232,7 +231,6 @@ get_raw_page_at_lsn(PG_FUNCTION_ARGS)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
forknum = forkname_to_number(text_to_cstring(forkname));
/* Initialize buffer to copy to */
@@ -240,7 +238,7 @@ get_raw_page_at_lsn(PG_FUNCTION_ARGS)
SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ);
raw_page_data = VARDATA(raw_page);
zenith_read_at_lsn(rel->rd_node, forknum, blkno, read_lsn, request_latest, raw_page_data);
neon_read_at_lsn(rel->rd_node, forknum, blkno, read_lsn, request_latest, raw_page_data);
relation_close(rel, AccessShareLock);
@@ -272,8 +270,7 @@ get_raw_page_at_lsn_ex(PG_FUNCTION_ARGS)
RelFileNode rnode = {
.spcNode = PG_GETARG_OID(0),
.dbNode = PG_GETARG_OID(1),
.relNode = PG_GETARG_OID(2)
};
.relNode = PG_GETARG_OID(2)};
ForkNumber forknum = PG_GETARG_UINT32(3);
@@ -281,14 +278,13 @@ get_raw_page_at_lsn_ex(PG_FUNCTION_ARGS)
bool request_latest = PG_ARGISNULL(5);
uint64 read_lsn = request_latest ? GetXLogInsertRecPtr() : PG_GETARG_INT64(5);
/* Initialize buffer to copy to */
bytea *raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ);
SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ);
raw_page_data = VARDATA(raw_page);
zenith_read_at_lsn(rnode, forknum, blkno, read_lsn, request_latest, raw_page_data);
neon_read_at_lsn(rnode, forknum, blkno, read_lsn, request_latest, raw_page_data);
PG_RETURN_BYTEA_P(raw_page);
}
}

View File

@@ -32,7 +32,7 @@ sha2 = "0.10.2"
socket2 = "0.4.4"
thiserror = "1.0.30"
tokio = { version = "1.17", features = ["macros"] }
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-rustls = "0.23.0"
url = "2.2.2"
git-version = "0.3.5"

View File

@@ -1,5 +1,5 @@
[tool.poetry]
name = "zenith"
name = "neon"
version = "0.1.0"
description = ""
authors = []

View File

@@ -14,8 +14,8 @@ tracing = "0.1.27"
clap = "3.0"
daemonize = "0.4.1"
tokio = { version = "1.17", features = ["macros", "fs"] }
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
anyhow = "1.0"
crc32c = "0.6.0"
humantime = "2.1.0"
@@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_with = "1.12.0"
hex = "0.4.3"
const_format = "0.2.21"
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
git-version = "0.3.5"
async-trait = "0.1"
once_cell = "1.13.0"

View File

@@ -30,8 +30,8 @@ use safekeeper::wal_service;
use safekeeper::SafeKeeperConf;
use utils::auth::JwtAuth;
use utils::{
http::endpoint, logging, project_git_version, shutdown::exit_now, signals, tcp_listener,
zid::NodeId,
http::endpoint, id::NodeId, logging, project_git_version, shutdown::exit_now, signals,
tcp_listener,
};
const LOCK_FILE_NAME: &str = "safekeeper.lock";
@@ -39,7 +39,7 @@ const ID_FILE_NAME: &str = "safekeeper.id";
project_git_version!(GIT_VERSION);
fn main() -> anyhow::Result<()> {
let arg_matches = App::new("Zenith safekeeper")
let arg_matches = App::new("Neon safekeeper")
.about("Store WAL stream to local file system and push it to WAL receivers")
.version(GIT_VERSION)
.arg(

View File

@@ -22,7 +22,7 @@ use etcd_broker::{
subscription_key::{OperationKind, SkOperationKind, SubscriptionKey},
Client, PutOptions,
};
use utils::zid::{NodeId, ZTenantTimelineId};
use utils::id::{NodeId, TenantTimelineId};
const RETRY_INTERVAL_MSEC: u64 = 1000;
const PUSH_INTERVAL_MSEC: u64 = 1000;
@@ -45,7 +45,7 @@ pub fn thread_main(conf: SafeKeeperConf) {
/// Key to per timeline per safekeeper data.
fn timeline_safekeeper_path(
broker_etcd_prefix: String,
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
sk_id: NodeId,
) -> String {
format!(
@@ -162,12 +162,12 @@ pub fn get_candiate_name(system_id: NodeId) -> String {
}
async fn push_sk_info(
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
mut client: Client,
key: String,
sk_info: SkTimelineInfo,
mut lease: Lease,
) -> anyhow::Result<(ZTenantTimelineId, Lease)> {
) -> anyhow::Result<(TenantTimelineId, Lease)> {
let put_opts = PutOptions::new().with_lease(lease.id);
client
.put(
@@ -202,7 +202,7 @@ struct Lease {
/// Push once in a while data about all active timelines to the broker.
async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> {
let mut client = Client::connect(&conf.broker_endpoints, None).await?;
let mut leases: HashMap<ZTenantTimelineId, Lease> = HashMap::new();
let mut leases: HashMap<TenantTimelineId, Lease> = HashMap::new();
let push_interval = Duration::from_millis(PUSH_INTERVAL_MSEC);
loop {

View File

@@ -14,7 +14,7 @@ use tracing::*;
use crate::control_file_upgrade::upgrade_control_file;
use crate::safekeeper::{SafeKeeperState, SK_FORMAT_VERSION, SK_MAGIC};
use metrics::{register_histogram_vec, Histogram, HistogramVec, DISK_WRITE_SECONDS_BUCKETS};
use utils::{bin_ser::LeSer, zid::ZTenantTimelineId};
use utils::{bin_ser::LeSer, id::TenantTimelineId};
use crate::SafeKeeperConf;
@@ -55,7 +55,7 @@ pub struct FileStorage {
}
impl FileStorage {
pub fn restore_new(zttid: &ZTenantTimelineId, conf: &SafeKeeperConf) -> Result<FileStorage> {
pub fn restore_new(zttid: &TenantTimelineId, conf: &SafeKeeperConf) -> Result<FileStorage> {
let timeline_dir = conf.timeline_dir(zttid);
let tenant_id = zttid.tenant_id.to_string();
let timeline_id = zttid.timeline_id.to_string();
@@ -72,7 +72,7 @@ impl FileStorage {
}
pub fn create_new(
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
conf: &SafeKeeperConf,
state: SafeKeeperState,
) -> Result<FileStorage> {
@@ -115,7 +115,7 @@ impl FileStorage {
// Load control file for given zttid at path specified by conf.
pub fn load_control_file_conf(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
) -> Result<SafeKeeperState> {
let path = conf.timeline_dir(zttid).join(CONTROL_FILE_NAME);
Self::load_control_file(path)
@@ -252,7 +252,7 @@ mod test {
use crate::{safekeeper::SafeKeeperState, SafeKeeperConf};
use anyhow::Result;
use std::fs;
use utils::{lsn::Lsn, zid::ZTenantTimelineId};
use utils::{id::TenantTimelineId, lsn::Lsn};
fn stub_conf() -> SafeKeeperConf {
let workdir = tempfile::tempdir().unwrap().into_path();
@@ -264,7 +264,7 @@ mod test {
fn load_from_control_file(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
) -> Result<(FileStorage, SafeKeeperState)> {
fs::create_dir_all(&conf.timeline_dir(zttid)).expect("failed to create timeline dir");
Ok((
@@ -275,7 +275,7 @@ mod test {
fn create(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
) -> Result<(FileStorage, SafeKeeperState)> {
fs::create_dir_all(&conf.timeline_dir(zttid)).expect("failed to create timeline dir");
let state = SafeKeeperState::empty();
@@ -286,7 +286,7 @@ mod test {
#[test]
fn test_read_write_safekeeper_state() {
let conf = stub_conf();
let zttid = ZTenantTimelineId::generate();
let zttid = TenantTimelineId::generate();
{
let (mut storage, mut state) = create(&conf, &zttid).expect("failed to create state");
// change something
@@ -301,7 +301,7 @@ mod test {
#[test]
fn test_safekeeper_state_checksum_mismatch() {
let conf = stub_conf();
let zttid = ZTenantTimelineId::generate();
let zttid = TenantTimelineId::generate();
{
let (mut storage, mut state) = create(&conf, &zttid).expect("failed to read state");

View File

@@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize};
use tracing::*;
use utils::{
bin_ser::LeSer,
id::{TenantId, TimelineId},
lsn::Lsn,
pq_proto::SystemId,
zid::{ZTenantId, ZTimelineId},
};
/// Persistent consensus state of the acceptor.
@@ -45,9 +45,8 @@ pub struct ServerInfoV2 {
/// Postgres server version
pub pg_version: u32,
pub system_id: SystemId,
pub tenant_id: ZTenantId,
/// Zenith timelineid
pub ztli: ZTimelineId,
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub wal_seg_size: u32,
}
@@ -76,10 +75,9 @@ pub struct ServerInfoV3 {
pub pg_version: u32,
pub system_id: SystemId,
#[serde(with = "hex")]
pub tenant_id: ZTenantId,
/// Zenith timelineid
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: ZTimelineId,
pub timeline_id: TimelineId,
pub wal_seg_size: u32,
}
@@ -106,10 +104,9 @@ pub struct SafeKeeperStateV3 {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SafeKeeperStateV4 {
#[serde(with = "hex")]
pub tenant_id: ZTenantId,
/// Zenith timelineid
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: ZTimelineId,
pub timeline_id: TimelineId,
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
@@ -154,7 +151,7 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<SafeKeeperState>
};
return Ok(SafeKeeperState {
tenant_id: oldstate.server.tenant_id,
timeline_id: oldstate.server.ztli,
timeline_id: oldstate.server.timeline_id,
acceptor_state: ac,
server: ServerInfo {
pg_version: oldstate.server.pg_version,
@@ -181,7 +178,7 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<SafeKeeperState>
};
return Ok(SafeKeeperState {
tenant_id: oldstate.server.tenant_id,
timeline_id: oldstate.server.ztli,
timeline_id: oldstate.server.timeline_id,
acceptor_state: oldstate.acceptor_state,
server,
proposer_uuid: oldstate.proposer_uuid,
@@ -193,9 +190,9 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<SafeKeeperState>
remote_consistent_lsn: Lsn(0),
peers: Peers(vec![]),
});
// migrate to moving ztenantid/ztli to the top and adding some lsns
// migrate to moving tenant_id/timeline_id to the top and adding some lsns
} else if version == 3 {
info!("reading safekeeper control file version {}", version);
info!("reading safekeeper control file version {version}");
let oldstate = SafeKeeperStateV3::des(&buf[..buf.len()])?;
let server = ServerInfo {
pg_version: oldstate.server.pg_version,

View File

@@ -14,10 +14,10 @@ use regex::Regex;
use std::sync::Arc;
use tracing::info;
use utils::{
id::{TenantId, TenantTimelineId, TimelineId},
lsn::Lsn,
postgres_backend::{self, PostgresBackend},
pq_proto::{BeMessage, FeStartupPacket, RowDescriptor, INT4_OID, TEXT_OID},
zid::{ZTenantId, ZTenantTimelineId, ZTimelineId},
};
/// Safekeeper handler of postgres commands
@@ -25,8 +25,8 @@ pub struct SafekeeperPostgresHandler {
pub conf: SafeKeeperConf,
/// assigned application name
pub appname: Option<String>,
pub ztenantid: Option<ZTenantId>,
pub ztimelineid: Option<ZTimelineId>,
pub tenant_id: Option<TenantId>,
pub timeline_id: Option<TimelineId>,
pub timeline: Option<Arc<Timeline>>,
}
@@ -63,17 +63,17 @@ fn parse_cmd(cmd: &str) -> Result<SafekeeperPostgresCommand> {
}
impl postgres_backend::Handler for SafekeeperPostgresHandler {
// ztenant id and ztimeline id are passed in connection string params
// tenant_id and timeline_id are passed in connection string params
fn startup(&mut self, _pgb: &mut PostgresBackend, sm: &FeStartupPacket) -> Result<()> {
if let FeStartupPacket::StartupMessage { params, .. } = sm {
if let Some(options) = params.options_raw() {
for opt in options {
match opt.split_once('=') {
Some(("ztenantid", value)) => {
self.ztenantid = Some(value.parse()?);
Some(("tenant_id", value)) => {
self.tenant_id = Some(value.parse()?);
}
Some(("ztimelineid", value)) => {
self.ztimelineid = Some(value.parse()?);
Some(("timeline_id", value)) => {
self.timeline_id = Some(value.parse()?);
}
_ => continue,
}
@@ -95,18 +95,18 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler {
info!(
"got query {:?} in timeline {:?}",
query_string, self.ztimelineid
query_string, self.timeline_id
);
let create = !(matches!(cmd, SafekeeperPostgresCommand::StartReplication { .. })
|| matches!(cmd, SafekeeperPostgresCommand::IdentifySystem));
let tenantid = self.ztenantid.context("tenantid is required")?;
let timelineid = self.ztimelineid.context("timelineid is required")?;
let tenant_id = self.tenant_id.context("tenant_id is required")?;
let timeline_id = self.timeline_id.context("timeline_id is required")?;
if self.timeline.is_none() {
self.timeline.set(
&self.conf,
ZTenantTimelineId::new(tenantid, timelineid),
TenantTimelineId::new(tenant_id, timeline_id),
create,
)?;
}
@@ -121,7 +121,7 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler {
SafekeeperPostgresCommand::IdentifySystem => self.handle_identify_system(pgb),
SafekeeperPostgresCommand::JSONCtrl { ref cmd } => handle_json_ctrl(self, pgb, cmd),
}
.context(format!("timeline {timelineid}"))?;
.context(format!("timeline {timeline_id}"))?;
Ok(())
}
@@ -132,8 +132,8 @@ impl SafekeeperPostgresHandler {
SafekeeperPostgresHandler {
conf,
appname: None,
ztenantid: None,
ztimelineid: None,
tenant_id: None,
timeline_id: None,
timeline: None,
}
}

View File

@@ -1,8 +1,8 @@
use serde::{Deserialize, Serialize};
use utils::zid::{NodeId, ZTimelineId};
use utils::id::{NodeId, TimelineId};
#[derive(Serialize, Deserialize)]
pub struct TimelineCreateRequest {
pub timeline_id: ZTimelineId,
pub timeline_id: TimelineId,
pub peer_ids: Vec<NodeId>,
}

View File

@@ -21,8 +21,8 @@ use utils::{
request::{ensure_no_body, parse_request_param},
RequestExt, RouterBuilder,
},
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
lsn::Lsn,
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
use super::models::TimelineCreateRequest;
@@ -68,9 +68,9 @@ struct AcceptorStateStatus {
#[derive(Debug, Serialize)]
struct TimelineStatus {
#[serde(serialize_with = "display_serialize")]
tenant_id: ZTenantId,
tenant_id: TenantId,
#[serde(serialize_with = "display_serialize")]
timeline_id: ZTimelineId,
timeline_id: TimelineId,
acceptor_state: AcceptorStateStatus,
#[serde(serialize_with = "display_serialize")]
flush_lsn: Lsn,
@@ -90,7 +90,7 @@ struct TimelineStatus {
/// Report info about timeline.
async fn timeline_status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let zttid = ZTenantTimelineId::new(
let zttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
@@ -125,7 +125,7 @@ async fn timeline_status_handler(request: Request<Body>) -> Result<Response<Body
async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let request_data: TimelineCreateRequest = json_request(&mut request).await?;
let zttid = ZTenantTimelineId {
let zttid = TenantTimelineId {
tenant_id: parse_request_param(&request, "tenant_id")?,
timeline_id: request_data.timeline_id,
};
@@ -146,7 +146,7 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
async fn timeline_delete_force_handler(
mut request: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let zttid = ZTenantTimelineId::new(
let zttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
@@ -181,7 +181,7 @@ async fn tenant_delete_force_handler(
/// Used only in tests to hand craft required data.
async fn record_safekeeper_info(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let zttid = ZTenantTimelineId::new(
let zttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);

View File

@@ -97,8 +97,8 @@ fn prepare_safekeeper(spg: &mut SafekeeperPostgresHandler) -> Result<()> {
pg_version: 0, // unknown
proposer_id: [0u8; 16],
system_id: 0,
ztli: spg.ztimelineid.unwrap(),
tenant_id: spg.ztenantid.unwrap(),
timeline_id: spg.timeline_id.unwrap(),
tenant_id: spg.tenant_id.unwrap(),
tli: 0,
wal_seg_size: WAL_SEGMENT_SIZE as u32, // 16MB, default for tests
});

View File

@@ -5,7 +5,7 @@ use std::path::PathBuf;
use std::time::Duration;
use url::Url;
use utils::zid::{NodeId, ZTenantId, ZTenantTimelineId};
use utils::id::{NodeId, TenantId, TenantTimelineId};
pub mod broker;
pub mod control_file;
@@ -61,11 +61,11 @@ pub struct SafeKeeperConf {
}
impl SafeKeeperConf {
pub fn tenant_dir(&self, tenant_id: &ZTenantId) -> PathBuf {
pub fn tenant_dir(&self, tenant_id: &TenantId) -> PathBuf {
self.workdir.join(tenant_id.to_string())
}
pub fn timeline_dir(&self, zttid: &ZTenantTimelineId) -> PathBuf {
pub fn timeline_dir(&self, zttid: &TenantTimelineId) -> PathBuf {
self.tenant_dir(&zttid.tenant_id)
.join(zttid.timeline_id.to_string())
}

View File

@@ -8,7 +8,7 @@ use metrics::{
Gauge, IntGaugeVec,
};
use postgres_ffi::XLogSegNo;
use utils::{lsn::Lsn, zid::ZTenantTimelineId};
use utils::{id::TenantTimelineId, lsn::Lsn};
use crate::{
safekeeper::{SafeKeeperState, SafekeeperMemState},
@@ -16,7 +16,7 @@ use crate::{
};
pub struct FullTimelineInfo {
pub zttid: ZTenantTimelineId,
pub zttid: TenantTimelineId,
pub replicas: Vec<ReplicaState>,
pub wal_backup_active: bool,
pub timeline_is_active: bool,

View File

@@ -53,7 +53,7 @@ impl<'pg> ReceiveWalConn<'pg> {
/// Receive WAL from wal_proposer
pub fn run(&mut self, spg: &mut SafekeeperPostgresHandler) -> Result<()> {
let _enter = info_span!("WAL acceptor", timeline = %spg.ztimelineid.unwrap()).entered();
let _enter = info_span!("WAL acceptor", timeline = %spg.timeline_id.unwrap()).entered();
// Notify the libpq client that it's allowed to send `CopyData` messages
self.pg_backend

View File

@@ -19,9 +19,9 @@ use crate::send_wal::HotStandbyFeedback;
use crate::wal_storage;
use utils::{
bin_ser::LeSer,
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
lsn::Lsn,
pq_proto::{ReplicationFeedback, SystemId},
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
pub const SK_MAGIC: u32 = 0xcafeceefu32;
@@ -166,10 +166,9 @@ pub struct Peers(pub Vec<(NodeId, PeerInfo)>);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SafeKeeperState {
#[serde(with = "hex")]
pub tenant_id: ZTenantId,
/// Zenith timelineid
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: ZTimelineId,
pub timeline_id: TimelineId,
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
@@ -219,7 +218,7 @@ pub struct SafekeeperMemState {
}
impl SafeKeeperState {
pub fn new(zttid: &ZTenantTimelineId, peers: Vec<NodeId>) -> SafeKeeperState {
pub fn new(zttid: &TenantTimelineId, peers: Vec<NodeId>) -> SafeKeeperState {
SafeKeeperState {
tenant_id: zttid.tenant_id,
timeline_id: zttid.timeline_id,
@@ -245,7 +244,7 @@ impl SafeKeeperState {
#[cfg(test)]
pub fn empty() -> Self {
SafeKeeperState::new(&ZTenantTimelineId::empty(), vec![])
SafeKeeperState::new(&TenantTimelineId::empty(), vec![])
}
}
@@ -260,9 +259,8 @@ pub struct ProposerGreeting {
pub pg_version: u32,
pub proposer_id: PgUuid,
pub system_id: SystemId,
/// Zenith timelineid
pub ztli: ZTimelineId,
pub tenant_id: ZTenantId,
pub timeline_id: TimelineId,
pub tenant_id: TenantId,
pub tli: TimeLineID,
pub wal_seg_size: u32,
}
@@ -507,13 +505,13 @@ where
{
// constructor
pub fn new(
ztli: ZTimelineId,
timeline_id: TimelineId,
state: CTRL,
mut wal_store: WAL,
node_id: NodeId,
) -> Result<SafeKeeper<CTRL, WAL>> {
if state.timeline_id != ZTimelineId::from([0u8; 16]) && ztli != state.timeline_id {
bail!("Calling SafeKeeper::new with inconsistent ztli ({}) and SafeKeeperState.server.timeline_id ({})", ztli, state.timeline_id);
if state.timeline_id != TimelineId::from([0u8; 16]) && timeline_id != state.timeline_id {
bail!("Calling SafeKeeper::new with inconsistent timeline_id ({}) and SafeKeeperState.server.timeline_id ({})", timeline_id, state.timeline_id);
}
// initialize wal_store, if state is already initialized
@@ -600,10 +598,10 @@ where
self.state.tenant_id
);
}
if msg.ztli != self.state.timeline_id {
if msg.timeline_id != self.state.timeline_id {
bail!(
"invalid timeline ID, got {}, expected {}",
msg.ztli,
msg.timeline_id,
self.state.timeline_id
);
}
@@ -982,9 +980,9 @@ mod tests {
persisted_state: SafeKeeperState::empty(),
};
let wal_store = DummyWalStore { lsn: Lsn(0) };
let ztli = ZTimelineId::from([0u8; 16]);
let timeline_id = TimelineId::from([0u8; 16]);
let mut sk = SafeKeeper::new(ztli, storage, wal_store, NodeId(0)).unwrap();
let mut sk = SafeKeeper::new(timeline_id, storage, wal_store, NodeId(0)).unwrap();
// check voting for 1 is ok
let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: 1 });
@@ -1000,7 +998,7 @@ mod tests {
persisted_state: state,
};
sk = SafeKeeper::new(ztli, storage, sk.wal_store, NodeId(0)).unwrap();
sk = SafeKeeper::new(timeline_id, storage, sk.wal_store, NodeId(0)).unwrap();
// and ensure voting second time for 1 is not ok
vote_resp = sk.process_msg(&vote_request);
@@ -1016,9 +1014,9 @@ mod tests {
persisted_state: SafeKeeperState::empty(),
};
let wal_store = DummyWalStore { lsn: Lsn(0) };
let ztli = ZTimelineId::from([0u8; 16]);
let timeline_id = TimelineId::from([0u8; 16]);
let mut sk = SafeKeeper::new(ztli, storage, wal_store, NodeId(0)).unwrap();
let mut sk = SafeKeeper::new(timeline_id, storage, wal_store, NodeId(0)).unwrap();
let mut ar_hdr = AppendRequestHeader {
term: 1,

View File

@@ -30,7 +30,7 @@ use utils::{
// See: https://www.postgresql.org/docs/13/protocol-replication.html
const HOT_STANDBY_FEEDBACK_TAG_BYTE: u8 = b'h';
const STANDBY_STATUS_UPDATE_TAG_BYTE: u8 = b'r';
// zenith extension of replication protocol
// neon extension of replication protocol
const NEON_STATUS_UPDATE_TAG_BYTE: u8 = b'z';
type FullTransactionId = u64;
@@ -105,7 +105,7 @@ impl ReplicationConn {
match &msg {
FeMessage::CopyData(m) => {
// There's three possible data messages that the client is supposed to send here:
// `HotStandbyFeedback` and `StandbyStatusUpdate` and `ZenithStandbyFeedback`.
// `HotStandbyFeedback` and `StandbyStatusUpdate` and `NeonStandbyFeedback`.
match m.first().cloned() {
Some(HOT_STANDBY_FEEDBACK_TAG_BYTE) => {
@@ -165,12 +165,12 @@ impl ReplicationConn {
pgb: &mut PostgresBackend,
mut start_pos: Lsn,
) -> Result<()> {
let _enter = info_span!("WAL sender", timeline = %spg.ztimelineid.unwrap()).entered();
let _enter = info_span!("WAL sender", timeline = %spg.timeline_id.unwrap()).entered();
// spawn the background thread which receives HotStandbyFeedback messages.
let bg_timeline = Arc::clone(spg.timeline.get());
let bg_stream_in = self.stream_in.take().unwrap();
let bg_timeline_id = spg.ztimelineid.unwrap();
let bg_timeline_id = spg.timeline_id.unwrap();
let state = ReplicaState::new();
// This replica_id is used below to check if it's time to stop replication.

View File

@@ -21,9 +21,9 @@ use tokio::sync::mpsc::Sender;
use tracing::*;
use utils::{
id::{NodeId, TenantId, TenantTimelineId},
lsn::Lsn,
pq_proto::ReplicationFeedback,
zid::{NodeId, ZTenantId, ZTenantTimelineId},
};
use crate::control_file;
@@ -98,7 +98,7 @@ impl SharedState {
/// Initialize timeline state, creating control file
fn create(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
peer_ids: Vec<NodeId>,
) -> Result<Self> {
let state = SafeKeeperState::new(zttid, peer_ids);
@@ -119,7 +119,7 @@ impl SharedState {
/// Restore SharedState from control file.
/// If file doesn't exist, bails out.
fn restore(conf: &SafeKeeperConf, zttid: &ZTenantTimelineId) -> Result<Self> {
fn restore(conf: &SafeKeeperConf, zttid: &TenantTimelineId) -> Result<Self> {
let control_store = control_file::FileStorage::restore_new(zttid, conf)?;
let wal_store = wal_storage::PhysicalStorage::new(zttid, conf);
@@ -143,7 +143,7 @@ impl SharedState {
/// Mark timeline active/inactive and return whether s3 offloading requires
/// start/stop action.
fn update_status(&mut self, ttid: ZTenantTimelineId) -> bool {
fn update_status(&mut self, ttid: TenantTimelineId) -> bool {
let is_active = self.is_active();
if self.active != is_active {
info!("timeline {} active={} now", ttid, is_active);
@@ -213,7 +213,7 @@ impl SharedState {
//
// To choose what feedback to use and resend to compute node,
// we need to know which pageserver compute node considers to be main.
// See https://github.com/zenithdb/zenith/issues/1171
// See https://github.com/neondatabase/neon/issues/1171
//
if let Some(pageserver_feedback) = state.pageserver_feedback {
if let Some(acc_feedback) = acc.pageserver_feedback {
@@ -227,7 +227,7 @@ impl SharedState {
// last lsn received by pageserver
// FIXME if multiple pageservers are streaming WAL, last_received_lsn must be tracked per pageserver.
// See https://github.com/zenithdb/zenith/issues/1171
// See https://github.com/neondatabase/neon/issues/1171
acc.last_received_lsn = Lsn::from(pageserver_feedback.ps_writelsn);
// When at least one pageserver has preserved data up to remote_consistent_lsn,
@@ -256,11 +256,11 @@ impl SharedState {
/// Database instance (tenant)
pub struct Timeline {
pub zttid: ZTenantTimelineId,
pub zttid: TenantTimelineId,
/// Sending here asks for wal backup launcher attention (start/stop
/// offloading). Sending zttid instead of concrete command allows to do
/// sending without timeline lock.
wal_backup_launcher_tx: Sender<ZTenantTimelineId>,
wal_backup_launcher_tx: Sender<TenantTimelineId>,
commit_lsn_watch_tx: watch::Sender<Lsn>,
/// For breeding receivers.
commit_lsn_watch_rx: watch::Receiver<Lsn>,
@@ -269,8 +269,8 @@ pub struct Timeline {
impl Timeline {
fn new(
zttid: ZTenantTimelineId,
wal_backup_launcher_tx: Sender<ZTenantTimelineId>,
zttid: TenantTimelineId,
wal_backup_launcher_tx: Sender<TenantTimelineId>,
shared_state: SharedState,
) -> Timeline {
let (commit_lsn_watch_tx, commit_lsn_watch_rx) =
@@ -539,13 +539,13 @@ impl Timeline {
// Utilities needed by various Connection-like objects
pub trait TimelineTools {
fn set(&mut self, conf: &SafeKeeperConf, zttid: ZTenantTimelineId, create: bool) -> Result<()>;
fn set(&mut self, conf: &SafeKeeperConf, zttid: TenantTimelineId, create: bool) -> Result<()>;
fn get(&self) -> &Arc<Timeline>;
}
impl TimelineTools for Option<Arc<Timeline>> {
fn set(&mut self, conf: &SafeKeeperConf, zttid: ZTenantTimelineId, create: bool) -> Result<()> {
fn set(&mut self, conf: &SafeKeeperConf, zttid: TenantTimelineId, create: bool) -> Result<()> {
*self = Some(GlobalTimelines::get(conf, zttid, create)?);
Ok(())
}
@@ -556,8 +556,8 @@ impl TimelineTools for Option<Arc<Timeline>> {
}
struct GlobalTimelinesState {
timelines: HashMap<ZTenantTimelineId, Arc<Timeline>>,
wal_backup_launcher_tx: Option<Sender<ZTenantTimelineId>>,
timelines: HashMap<TenantTimelineId, Arc<Timeline>>,
wal_backup_launcher_tx: Option<Sender<TenantTimelineId>>,
}
static TIMELINES_STATE: Lazy<Mutex<GlobalTimelinesState>> = Lazy::new(|| {
@@ -577,7 +577,7 @@ pub struct TimelineDeleteForceResult {
pub struct GlobalTimelines;
impl GlobalTimelines {
pub fn init(wal_backup_launcher_tx: Sender<ZTenantTimelineId>) {
pub fn init(wal_backup_launcher_tx: Sender<TenantTimelineId>) {
let mut state = TIMELINES_STATE.lock().unwrap();
assert!(state.wal_backup_launcher_tx.is_none());
state.wal_backup_launcher_tx = Some(wal_backup_launcher_tx);
@@ -586,7 +586,7 @@ impl GlobalTimelines {
fn create_internal(
mut state: MutexGuard<GlobalTimelinesState>,
conf: &SafeKeeperConf,
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
peer_ids: Vec<NodeId>,
) -> Result<Arc<Timeline>> {
match state.timelines.get(&zttid) {
@@ -612,7 +612,7 @@ impl GlobalTimelines {
pub fn create(
conf: &SafeKeeperConf,
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
peer_ids: Vec<NodeId>,
) -> Result<Arc<Timeline>> {
let state = TIMELINES_STATE.lock().unwrap();
@@ -623,7 +623,7 @@ impl GlobalTimelines {
/// If control file doesn't exist and create=false, bails out.
pub fn get(
conf: &SafeKeeperConf,
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
create: bool,
) -> Result<Arc<Timeline>> {
let _enter = info_span!("", timeline = %zttid.timeline_id).entered();
@@ -664,13 +664,12 @@ impl GlobalTimelines {
}
/// Get loaded timeline, if it exists.
pub fn get_loaded(zttid: ZTenantTimelineId) -> Option<Arc<Timeline>> {
pub fn get_loaded(zttid: TenantTimelineId) -> Option<Arc<Timeline>> {
let state = TIMELINES_STATE.lock().unwrap();
state.timelines.get(&zttid).map(Arc::clone)
}
/// Get ZTenantTimelineIDs of all active timelines.
pub fn get_active_timelines() -> HashSet<ZTenantTimelineId> {
pub fn get_active_timelines() -> HashSet<TenantTimelineId> {
let state = TIMELINES_STATE.lock().unwrap();
state
.timelines
@@ -692,7 +691,7 @@ impl GlobalTimelines {
fn delete_force_internal(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
was_active: bool,
) -> Result<TimelineDeleteForceResult> {
match std::fs::remove_dir_all(conf.timeline_dir(zttid)) {
@@ -721,7 +720,7 @@ impl GlobalTimelines {
/// TODO: ensure all of the above never happens.
pub async fn delete_force(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
zttid: &TenantTimelineId,
) -> Result<TimelineDeleteForceResult> {
info!("deleting timeline {}", zttid);
let timeline = TIMELINES_STATE.lock().unwrap().timelines.remove(zttid);
@@ -737,8 +736,8 @@ impl GlobalTimelines {
/// There may be a race if new timelines are created simultaneously.
pub async fn delete_force_all_for_tenant(
conf: &SafeKeeperConf,
tenant_id: &ZTenantId,
) -> Result<HashMap<ZTenantTimelineId, TimelineDeleteForceResult>> {
tenant_id: &TenantId,
) -> Result<HashMap<TenantTimelineId, TimelineDeleteForceResult>> {
info!("deleting all timelines for tenant {}", tenant_id);
let mut to_delete = HashMap::new();
{

View File

@@ -23,7 +23,7 @@ use tokio::sync::watch;
use tokio::time::sleep;
use tracing::*;
use utils::{lsn::Lsn, zid::ZTenantTimelineId};
use utils::{id::TenantTimelineId, lsn::Lsn};
use crate::broker::{Election, ElectionLeader};
use crate::timeline::{GlobalTimelines, Timeline};
@@ -38,7 +38,7 @@ const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000;
pub fn wal_backup_launcher_thread_main(
conf: SafeKeeperConf,
wal_backup_launcher_rx: Receiver<ZTenantTimelineId>,
wal_backup_launcher_rx: Receiver<TenantTimelineId>,
) {
let rt = Builder::new_multi_thread()
.worker_threads(conf.backup_runtime_threads)
@@ -53,7 +53,7 @@ pub fn wal_backup_launcher_thread_main(
/// Check whether wal backup is required for timeline. If yes, mark that launcher is
/// aware of current status and return the timeline.
fn is_wal_backup_required(zttid: ZTenantTimelineId) -> Option<Arc<Timeline>> {
fn is_wal_backup_required(zttid: TenantTimelineId) -> Option<Arc<Timeline>> {
GlobalTimelines::get_loaded(zttid).filter(|t| t.wal_backup_attend())
}
@@ -70,7 +70,7 @@ struct WalBackupTimelineEntry {
/// Start per timeline task, if it makes sense for this safekeeper to offload.
fn consider_start_task(
conf: &SafeKeeperConf,
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
task: &mut WalBackupTimelineEntry,
) {
if !task.timeline.can_wal_backup() {
@@ -117,7 +117,7 @@ const CHECK_TASKS_INTERVAL_MSEC: u64 = 1000;
/// panics and separate elections from offloading itself.
async fn wal_backup_launcher_main_loop(
conf: SafeKeeperConf,
mut wal_backup_launcher_rx: Receiver<ZTenantTimelineId>,
mut wal_backup_launcher_rx: Receiver<TenantTimelineId>,
) {
info!(
"WAL backup launcher started, remote config {:?}",
@@ -135,7 +135,7 @@ async fn wal_backup_launcher_main_loop(
// Presense in this map means launcher is aware s3 offloading is needed for
// the timeline, but task is started only if it makes sense for to offload
// from this safekeeper.
let mut tasks: HashMap<ZTenantTimelineId, WalBackupTimelineEntry> = HashMap::new();
let mut tasks: HashMap<TenantTimelineId, WalBackupTimelineEntry> = HashMap::new();
let mut ticker = tokio::time::interval(Duration::from_millis(CHECK_TASKS_INTERVAL_MSEC));
loop {
@@ -193,7 +193,7 @@ struct WalBackupTask {
/// Offload single timeline.
async fn backup_task_main(
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
timeline_dir: PathBuf,
mut shutdown_rx: Receiver<()>,
election: Election,

View File

@@ -25,7 +25,7 @@ use std::path::{Path, PathBuf};
use tracing::*;
use utils::{lsn::Lsn, zid::ZTenantTimelineId};
use utils::{id::TenantTimelineId, lsn::Lsn};
use crate::safekeeper::SafeKeeperState;
@@ -86,7 +86,7 @@ struct WalStorageMetrics {
}
impl WalStorageMetrics {
fn new(zttid: &ZTenantTimelineId) -> Self {
fn new(zttid: &TenantTimelineId) -> Self {
let tenant_id = zttid.tenant_id.to_string();
let timeline_id = zttid.timeline_id.to_string();
Self {
@@ -130,7 +130,7 @@ pub trait Storage {
/// When storage is just created, all LSNs are zeroes and there are no segments on disk.
pub struct PhysicalStorage {
metrics: WalStorageMetrics,
zttid: ZTenantTimelineId,
zttid: TenantTimelineId,
timeline_dir: PathBuf,
conf: SafeKeeperConf,
@@ -161,7 +161,7 @@ pub struct PhysicalStorage {
}
impl PhysicalStorage {
pub fn new(zttid: &ZTenantTimelineId, conf: &SafeKeeperConf) -> PhysicalStorage {
pub fn new(zttid: &TenantTimelineId, conf: &SafeKeeperConf) -> PhysicalStorage {
let timeline_dir = conf.timeline_dir(zttid);
PhysicalStorage {
metrics: WalStorageMetrics::new(zttid),

View File

@@ -5,8 +5,8 @@ set -eux -o pipefail
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
echo "Uploading perf report to zenith pg"
# ingest per test results data into zenith backed postgres running in staging to build grafana reports on that data
echo "Uploading perf report to neon pg"
# ingest per test results data into neon backed postgres running in staging to build grafana reports on that data
DATABASE_URL="$PERF_TEST_RESULT_CONNSTR" poetry run python "$SCRIPT_DIR"/ingest_perf_test_result.py --ingest "$REPORT_FROM"
# Activate poetry's venv. Needed because git upload does not run in a project dir (it uses tmp to store the repository)
@@ -16,8 +16,8 @@ DATABASE_URL="$PERF_TEST_RESULT_CONNSTR" poetry run python "$SCRIPT_DIR"/ingest_
echo "Uploading perf result to zenith-perf-data"
scripts/git-upload \
--repo=https://"$VIP_VAP_ACCESS_TOKEN"@github.com/zenithdb/zenith-perf-data.git \
--message="add performance test result for $GITHUB_SHA zenith revision" \
--repo=https://"$VIP_VAP_ACCESS_TOKEN"@github.com/neondatabase/zenith-perf-data.git \
--message="add performance test result for $GITHUB_SHA neon revision" \
--branch=master \
copy "$REPORT_FROM" "data/$REPORT_TO" `# COPY FROM TO_RELATIVE`\
--merge \

Some files were not shown because too many files have changed in this diff Show More