initial commit, renamed znodeid to nodeid.

This commit is contained in:
chaitanya sharma
2022-05-23 15:52:21 +00:00
committed by Anastasia Lubennikova
parent 7997fc2932
commit c584d90bb9
14 changed files with 64 additions and 64 deletions

View File

@@ -15,7 +15,7 @@ use std::process::{Command, Stdio};
use utils::{
auth::{encode_from_key_file, Claims, Scope},
postgres_backend::AuthType,
zid::{ZNodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
use crate::safekeeper::SafekeeperNode;
@@ -136,7 +136,7 @@ impl EtcdBroker {
#[serde(default)]
pub struct PageServerConf {
// node id
pub id: ZNodeId,
pub id: NodeId,
// Pageserver connection settings
pub listen_pg_addr: String,
pub listen_http_addr: String,
@@ -151,7 +151,7 @@ pub struct PageServerConf {
impl Default for PageServerConf {
fn default() -> Self {
Self {
id: ZNodeId(0),
id: NodeId(0),
listen_pg_addr: String::new(),
listen_http_addr: String::new(),
auth_type: AuthType::Trust,
@@ -163,7 +163,7 @@ impl Default for PageServerConf {
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
#[serde(default)]
pub struct SafekeeperConf {
pub id: ZNodeId,
pub id: NodeId,
pub pg_port: u16,
pub http_port: u16,
pub sync: bool,
@@ -172,7 +172,7 @@ pub struct SafekeeperConf {
impl Default for SafekeeperConf {
fn default() -> Self {
Self {
id: ZNodeId(0),
id: NodeId(0),
pg_port: 0,
http_port: 0,
sync: true,

View File

@@ -18,7 +18,7 @@ use thiserror::Error;
use utils::{
connstring::connection_address,
http::error::HttpErrorBody,
zid::{ZNodeId, ZTenantId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTimelineId},
};
use crate::local_env::{LocalEnv, SafekeeperConf};
@@ -65,7 +65,7 @@ impl ResponseErrorMessageExt for Response {
//
#[derive(Debug)]
pub struct SafekeeperNode {
pub id: ZNodeId,
pub id: NodeId,
pub conf: SafekeeperConf,
@@ -100,7 +100,7 @@ impl SafekeeperNode {
.unwrap()
}
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: ZNodeId) -> PathBuf {
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf {
env.safekeeper_data_dir(format!("sk{}", sk_id).as_ref())
}
@@ -286,7 +286,7 @@ impl SafekeeperNode {
&self,
tenant_id: ZTenantId,
timeline_id: ZTimelineId,
peer_ids: Vec<ZNodeId>,
peer_ids: Vec<NodeId>,
) -> Result<()> {
Ok(self
.http_request(

View File

@@ -16,7 +16,7 @@ use tokio::{sync::mpsc, task::JoinHandle};
use tracing::*;
use utils::{
lsn::Lsn,
zid::{ZNodeId, ZTenantId, ZTenantTimelineId},
zid::{NodeId, ZTenantId, ZTenantTimelineId},
};
/// Default value to use for prefixing to all etcd keys with.
@@ -25,7 +25,7 @@ pub const DEFAULT_NEON_BROKER_ETCD_PREFIX: &str = "neon";
#[derive(Debug, Deserialize, Serialize)]
struct SafekeeperTimeline {
safekeeper_id: ZNodeId,
safekeeper_id: NodeId,
info: SkTimelineInfo,
}
@@ -71,7 +71,7 @@ pub enum BrokerError {
/// A way to control the data retrieval from a certain subscription.
pub struct SkTimelineSubscription {
safekeeper_timeline_updates:
mpsc::UnboundedReceiver<HashMap<ZTenantTimelineId, HashMap<ZNodeId, SkTimelineInfo>>>,
mpsc::UnboundedReceiver<HashMap<ZTenantTimelineId, HashMap<NodeId, SkTimelineInfo>>>,
kind: SkTimelineSubscriptionKind,
watcher_handle: JoinHandle<Result<(), BrokerError>>,
watcher: Watcher,
@@ -81,7 +81,7 @@ impl SkTimelineSubscription {
/// Asynchronously polls for more data from the subscription, suspending the current future if there's no data sent yet.
pub async fn fetch_data(
&mut self,
) -> Option<HashMap<ZTenantTimelineId, HashMap<ZNodeId, SkTimelineInfo>>> {
) -> Option<HashMap<ZTenantTimelineId, HashMap<NodeId, SkTimelineInfo>>> {
self.safekeeper_timeline_updates.recv().await
}
@@ -221,7 +221,7 @@ pub async fn subscribe_to_safekeeper_timeline_updates(
break;
}
let mut timeline_updates: HashMap<ZTenantTimelineId, HashMap<ZNodeId, SkTimelineInfo>> = HashMap::new();
let mut timeline_updates: HashMap<ZTenantTimelineId, HashMap<NodeId, SkTimelineInfo>> = HashMap::new();
// Keep track that the timeline data updates from etcd arrive in the right order.
// https://etcd.io/docs/v3.5/learning/api_guarantees/#isolation-level-and-consistency-of-replicas
// > etcd does not ensure linearizability for watch operations. Users are expected to verify the revision of watch responses to ensure correct ordering.
@@ -299,18 +299,18 @@ fn parse_etcd_key_value(
parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?,
parse_capture(&caps, 2).map_err(BrokerError::ParsingError)?,
),
ZNodeId(parse_capture(&caps, 3).map_err(BrokerError::ParsingError)?),
NodeId(parse_capture(&caps, 3).map_err(BrokerError::ParsingError)?),
),
SubscriptionKind::Tenant(tenant_id) => (
ZTenantTimelineId::new(
tenant_id,
parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?,
),
ZNodeId(parse_capture(&caps, 2).map_err(BrokerError::ParsingError)?),
NodeId(parse_capture(&caps, 2).map_err(BrokerError::ParsingError)?),
),
SubscriptionKind::Timeline(zttid) => (
zttid,
ZNodeId(parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?),
NodeId(parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?),
),
};

View File

@@ -226,9 +226,9 @@ impl fmt::Display for ZTenantTimelineId {
// by the console.
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd, Hash, Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ZNodeId(pub u64);
pub struct NodeId(pub u64);
impl fmt::Display for ZNodeId {
impl fmt::Display for NodeId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}

View File

@@ -22,14 +22,14 @@ use utils::{
lsn::Lsn,
postgres_backend::AuthType,
project_git_version,
zid::{ZNodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
use pageserver::timelines::TimelineInfo;
// Default id of a safekeeper node, if not specified on the command line.
const DEFAULT_SAFEKEEPER_ID: ZNodeId = ZNodeId(1);
const DEFAULT_PAGESERVER_ID: ZNodeId = ZNodeId(1);
const DEFAULT_SAFEKEEPER_ID: NodeId = NodeId(1);
const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1);
const DEFAULT_BRANCH_NAME: &str = "main";
project_git_version!(GIT_VERSION);
@@ -860,7 +860,7 @@ fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
Ok(())
}
fn get_safekeeper(env: &local_env::LocalEnv, id: ZNodeId) -> Result<SafekeeperNode> {
fn get_safekeeper(env: &local_env::LocalEnv, id: NodeId) -> Result<SafekeeperNode> {
if let Some(node) = env.safekeepers.iter().find(|node| node.id == id) {
Ok(SafekeeperNode::from_env(env, node))
} else {
@@ -876,7 +876,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
// All the commands take an optional safekeeper name argument
let sk_id = if let Some(id_str) = sub_args.value_of("id") {
ZNodeId(id_str.parse().context("while parsing safekeeper id")?)
NodeId(id_str.parse().context("while parsing safekeeper id")?)
} else {
DEFAULT_SAFEKEEPER_ID
};

View File

@@ -16,7 +16,7 @@ use toml_edit::{Document, Item};
use url::Url;
use utils::{
postgres_backend::AuthType,
zid::{ZNodeId, ZTenantId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTimelineId},
};
use crate::layered_repository::TIMELINES_SEGMENT_NAME;
@@ -78,7 +78,7 @@ pub mod defaults {
pub struct PageServerConf {
// Identifier of that particular pageserver so e g safekeepers
// can safely distinguish different pageservers
pub id: ZNodeId,
pub id: NodeId,
/// Example (default): 127.0.0.1:64000
pub listen_pg_addr: String,
@@ -180,7 +180,7 @@ struct PageServerConfigBuilder {
auth_validation_public_key_path: BuilderValue<Option<PathBuf>>,
remote_storage_config: BuilderValue<Option<RemoteStorageConfig>>,
id: BuilderValue<ZNodeId>,
id: BuilderValue<NodeId>,
profiling: BuilderValue<ProfilingConfig>,
broker_etcd_prefix: BuilderValue<String>,
@@ -276,7 +276,7 @@ impl PageServerConfigBuilder {
self.broker_etcd_prefix = BuilderValue::Set(broker_etcd_prefix)
}
pub fn id(&mut self, node_id: ZNodeId) {
pub fn id(&mut self, node_id: NodeId) {
self.id = BuilderValue::Set(node_id)
}
@@ -399,7 +399,7 @@ impl PageServerConf {
"tenant_config" => {
t_conf = Self::parse_toml_tenant_conf(item)?;
}
"id" => builder.id(ZNodeId(parse_toml_u64(key, item)?)),
"id" => builder.id(NodeId(parse_toml_u64(key, item)?)),
"profiling" => builder.profiling(parse_toml_from_str(key, item)?),
"broker_etcd_prefix" => builder.broker_etcd_prefix(parse_toml_string(key, item)?),
"broker_endpoints" => builder.broker_endpoints(
@@ -550,7 +550,7 @@ impl PageServerConf {
#[cfg(test)]
pub fn dummy_conf(repo_dir: PathBuf) -> Self {
PageServerConf {
id: ZNodeId(0),
id: NodeId(0),
wait_lsn_timeout: Duration::from_secs(60),
wal_redo_timeout: Duration::from_secs(60),
page_cache_size: defaults::DEFAULT_PAGE_CACHE_SIZE,
@@ -693,7 +693,7 @@ id = 10
assert_eq!(
parsed_config,
PageServerConf {
id: ZNodeId(10),
id: NodeId(10),
listen_pg_addr: defaults::DEFAULT_PG_LISTEN_ADDR.to_string(),
listen_http_addr: defaults::DEFAULT_HTTP_LISTEN_ADDR.to_string(),
wait_lsn_timeout: humantime::parse_duration(defaults::DEFAULT_WAIT_LSN_TIMEOUT)?,
@@ -737,7 +737,7 @@ id = 10
assert_eq!(
parsed_config,
PageServerConf {
id: ZNodeId(10),
id: NodeId(10),
listen_pg_addr: "127.0.0.1:64000".to_string(),
listen_http_addr: "127.0.0.1:9898".to_string(),
wait_lsn_timeout: Duration::from_secs(111),

View File

@@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DisplayFromStr};
use utils::{
lsn::Lsn,
zid::{ZNodeId, ZTenantId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTimelineId},
};
#[serde_as]
@@ -42,7 +42,7 @@ pub struct TenantCreateResponse(#[serde_as(as = "DisplayFromStr")] pub ZTenantId
#[derive(Serialize)]
pub struct StatusResponse {
pub id: ZNodeId,
pub id: NodeId,
}
impl TenantCreateRequest {

View File

@@ -24,7 +24,7 @@ use safekeeper::{broker, callmemaybe};
use safekeeper::{http, s3_offload};
use utils::{
http::endpoint, logging, project_git_version, shutdown::exit_now, signals, tcp_listener,
zid::ZNodeId,
zid::NodeId,
};
const LOCK_FILE_NAME: &str = "safekeeper.lock";
@@ -167,7 +167,7 @@ fn main() -> anyhow::Result<()> {
let mut given_id = None;
if let Some(given_id_str) = arg_matches.value_of("id") {
given_id = Some(ZNodeId(
given_id = Some(NodeId(
given_id_str
.parse()
.context("failed to parse safekeeper id")?,
@@ -192,7 +192,7 @@ fn main() -> anyhow::Result<()> {
start_safekeeper(conf, given_id, arg_matches.is_present("init"))
}
fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<ZNodeId>, init: bool) -> Result<()> {
fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<NodeId>, init: bool) -> Result<()> {
let log_file = logging::init("safekeeper.log", conf.daemonize)?;
info!("version: {GIT_VERSION}");
@@ -345,14 +345,14 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<ZNodeId>, init: b
}
/// Determine safekeeper id and set it in config.
fn set_id(conf: &mut SafeKeeperConf, given_id: Option<ZNodeId>) -> Result<()> {
fn set_id(conf: &mut SafeKeeperConf, given_id: Option<NodeId>) -> Result<()> {
let id_file_path = conf.workdir.join(ID_FILE_NAME);
let my_id: ZNodeId;
let my_id: NodeId;
// If ID exists, read it in; otherwise set one passed
match fs::read(&id_file_path) {
Ok(id_serialized) => {
my_id = ZNodeId(
my_id = NodeId(
std::str::from_utf8(&id_serialized)
.context("failed to parse safekeeper id")?
.parse()

View File

@@ -12,7 +12,7 @@ use tokio::{runtime, time::sleep};
use tracing::*;
use crate::{timeline::GlobalTimelines, SafeKeeperConf};
use utils::zid::{ZNodeId, ZTenantTimelineId};
use utils::zid::{NodeId, ZTenantTimelineId};
const RETRY_INTERVAL_MSEC: u64 = 1000;
const PUSH_INTERVAL_MSEC: u64 = 1000;
@@ -36,7 +36,7 @@ pub fn thread_main(conf: SafeKeeperConf) {
fn timeline_safekeeper_path(
broker_etcd_prefix: String,
zttid: ZTenantTimelineId,
sk_id: ZNodeId,
sk_id: NodeId,
) -> String {
format!(
"{}/{sk_id}",

View File

@@ -1,9 +1,9 @@
use serde::{Deserialize, Serialize};
use utils::zid::{ZNodeId, ZTenantId, ZTimelineId};
use utils::zid::{NodeId, ZTenantId, ZTimelineId};
#[derive(Serialize, Deserialize)]
pub struct TimelineCreateRequest {
pub tenant_id: ZTenantId,
pub timeline_id: ZTimelineId,
pub peer_ids: Vec<ZNodeId>,
pub peer_ids: Vec<NodeId>,
}

View File

@@ -20,14 +20,14 @@ use utils::{
RequestExt, RouterBuilder,
},
lsn::Lsn,
zid::{ZNodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
use super::models::TimelineCreateRequest;
#[derive(Debug, Serialize)]
struct SafekeeperStatus {
id: ZNodeId,
id: NodeId,
}
/// Healthcheck handler.
@@ -178,7 +178,7 @@ async fn record_safekeeper_info(mut request: Request<Body>) -> Result<Response<B
let safekeeper_info: SkTimelineInfo = json_request(&mut request).await?;
let tli = GlobalTimelines::get(get_conf(&request), zttid, false).map_err(ApiError::from_err)?;
tli.record_safekeeper_info(&safekeeper_info, ZNodeId(1))?;
tli.record_safekeeper_info(&safekeeper_info, NodeId(1))?;
json_response(StatusCode::OK, ())
}

View File

@@ -3,7 +3,7 @@ use std::path::PathBuf;
use std::time::Duration;
use url::Url;
use utils::zid::{ZNodeId, ZTenantId, ZTenantTimelineId};
use utils::zid::{NodeId, ZTenantId, ZTenantTimelineId};
pub mod broker;
pub mod callmemaybe;
@@ -49,7 +49,7 @@ pub struct SafeKeeperConf {
pub listen_http_addr: String,
pub ttl: Option<Duration>,
pub recall_period: Duration,
pub my_id: ZNodeId,
pub my_id: NodeId,
pub broker_endpoints: Vec<Url>,
pub broker_etcd_prefix: String,
pub s3_offload_enabled: bool,
@@ -79,7 +79,7 @@ impl Default for SafeKeeperConf {
listen_http_addr: defaults::DEFAULT_HTTP_LISTEN_ADDR.to_string(),
ttl: None,
recall_period: defaults::DEFAULT_RECALL_PERIOD,
my_id: ZNodeId(0),
my_id: NodeId(0),
broker_endpoints: Vec::new(),
broker_etcd_prefix: etcd_broker::DEFAULT_NEON_BROKER_ETCD_PREFIX.to_string(),
s3_offload_enabled: true,

View File

@@ -26,7 +26,7 @@ use utils::{
bin_ser::LeSer,
lsn::Lsn,
pq_proto::{SystemId, ZenithFeedback},
zid::{ZNodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
zid::{NodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
};
pub const SK_MAGIC: u32 = 0xcafeceefu32;
@@ -164,7 +164,7 @@ impl PeerInfo {
// vector-based node id -> peer state map with very limited functionality we
// need/
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Peers(pub Vec<(ZNodeId, PeerInfo)>);
pub struct Peers(pub Vec<(NodeId, PeerInfo)>);
/// Persistent information stored on safekeeper node
/// On disk data is prefixed by magic and format version and followed by checksum.
@@ -224,7 +224,7 @@ pub struct SafekeeperMemState {
}
impl SafeKeeperState {
pub fn new(zttid: &ZTenantTimelineId, peers: Vec<ZNodeId>) -> SafeKeeperState {
pub fn new(zttid: &ZTenantTimelineId, peers: Vec<NodeId>) -> SafeKeeperState {
SafeKeeperState {
tenant_id: zttid.tenant_id,
timeline_id: zttid.timeline_id,
@@ -277,7 +277,7 @@ pub struct ProposerGreeting {
#[derive(Debug, Serialize)]
pub struct AcceptorGreeting {
term: u64,
node_id: ZNodeId,
node_id: NodeId,
}
/// Vote request sent from proposer to safekeepers
@@ -531,7 +531,7 @@ pub struct SafeKeeper<CTRL: control_file::Storage, WAL: wal_storage::Storage> {
pub wal_store: WAL,
node_id: ZNodeId, // safekeeper's node id
node_id: NodeId, // safekeeper's node id
}
impl<CTRL, WAL> SafeKeeper<CTRL, WAL>
@@ -544,7 +544,7 @@ where
ztli: ZTimelineId,
state: CTRL,
mut wal_store: WAL,
node_id: ZNodeId,
node_id: NodeId,
) -> Result<SafeKeeper<CTRL, WAL>> {
if state.timeline_id != ZTimelineId::from([0u8; 16]) && ztli != state.timeline_id {
bail!("Calling SafeKeeper::new with inconsistent ztli ({}) and SafeKeeperState.server.timeline_id ({})", ztli, state.timeline_id);
@@ -1013,7 +1013,7 @@ mod tests {
};
let wal_store = DummyWalStore { lsn: Lsn(0) };
let ztli = ZTimelineId::from([0u8; 16]);
let mut sk = SafeKeeper::new(ztli, storage, wal_store, ZNodeId(0)).unwrap();
let mut sk = SafeKeeper::new(ztli, storage, wal_store, NodeId(0)).unwrap();
// check voting for 1 is ok
let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: 1 });
@@ -1028,7 +1028,7 @@ mod tests {
let storage = InMemoryState {
persisted_state: state,
};
sk = SafeKeeper::new(ztli, storage, sk.wal_store, ZNodeId(0)).unwrap();
sk = SafeKeeper::new(ztli, storage, sk.wal_store, NodeId(0)).unwrap();
// and ensure voting second time for 1 is not ok
vote_resp = sk.process_msg(&vote_request);
@@ -1045,7 +1045,7 @@ mod tests {
};
let wal_store = DummyWalStore { lsn: Lsn(0) };
let ztli = ZTimelineId::from([0u8; 16]);
let mut sk = SafeKeeper::new(ztli, storage, wal_store, ZNodeId(0)).unwrap();
let mut sk = SafeKeeper::new(ztli, storage, wal_store, NodeId(0)).unwrap();
let mut ar_hdr = AppendRequestHeader {
term: 1,

View File

@@ -21,7 +21,7 @@ use tracing::*;
use utils::{
lsn::Lsn,
pq_proto::ZenithFeedback,
zid::{ZNodeId, ZTenantId, ZTenantTimelineId},
zid::{NodeId, ZTenantId, ZTenantTimelineId},
};
use crate::callmemaybe::{CallmeEvent, SubscriptionStateKey};
@@ -99,7 +99,7 @@ impl SharedState {
fn create(
conf: &SafeKeeperConf,
zttid: &ZTenantTimelineId,
peer_ids: Vec<ZNodeId>,
peer_ids: Vec<NodeId>,
) -> Result<Self> {
let state = SafeKeeperState::new(zttid, peer_ids);
let control_store = control_file::FileStorage::create_new(zttid, conf, state)?;
@@ -448,7 +448,7 @@ impl Timeline {
}
/// Update timeline state with peer safekeeper data.
pub fn record_safekeeper_info(&self, sk_info: &SkTimelineInfo, _sk_id: ZNodeId) -> Result<()> {
pub fn record_safekeeper_info(&self, sk_info: &SkTimelineInfo, _sk_id: NodeId) -> Result<()> {
let mut shared_state = self.mutex.lock().unwrap();
shared_state.sk.record_safekeeper_info(sk_info)?;
self.notify_wal_senders(&mut shared_state);
@@ -551,7 +551,7 @@ impl GlobalTimelines {
mut state: MutexGuard<GlobalTimelinesState>,
conf: &SafeKeeperConf,
zttid: ZTenantTimelineId,
peer_ids: Vec<ZNodeId>,
peer_ids: Vec<NodeId>,
) -> Result<Arc<Timeline>> {
match state.timelines.get(&zttid) {
Some(_) => bail!("timeline {} already exists", zttid),
@@ -576,7 +576,7 @@ impl GlobalTimelines {
pub fn create(
conf: &SafeKeeperConf,
zttid: ZTenantTimelineId,
peer_ids: Vec<ZNodeId>,
peer_ids: Vec<NodeId>,
) -> Result<Arc<Timeline>> {
let state = TIMELINES_STATE.lock().unwrap();
GlobalTimelines::create_internal(state, conf, zttid, peer_ids)