Rename more zid-like idents (#2480)

Follow-up to PR #2433 (b8eb908a). There's still a few more unresolved
locations that have been left as-is for the same compatibility reasons
in the original PR.
This commit is contained in:
sharnoff
2022-09-20 11:06:31 -07:00
committed by GitHub
parent a5019bf771
commit 4b25b9652a
7 changed files with 46 additions and 46 deletions

View File

@@ -3,20 +3,20 @@
use criterion::{criterion_group, criterion_main, Criterion};
use utils::id;
pub fn bench_zid_stringify(c: &mut Criterion) {
pub fn bench_id_stringify(c: &mut Criterion) {
// Can only use public methods.
let ztl = id::TenantTimelineId::generate();
let ttid = id::TenantTimelineId::generate();
c.bench_function("zid.to_string", |b| {
c.bench_function("id.to_string", |b| {
b.iter(|| {
// FIXME measurement overhead?
//for _ in 0..1000 {
// ztl.tenant_id.to_string();
// ttid.tenant_id.to_string();
//}
ztl.tenant_id.to_string();
ttid.tenant_id.to_string();
})
});
}
criterion_group!(benches, bench_zid_stringify);
criterion_group!(benches, bench_id_stringify);
criterion_main!(benches);

View File

@@ -931,7 +931,7 @@ impl ReplicationFeedback {
// Deserialize ReplicationFeedback message
pub fn parse(mut buf: Bytes) -> ReplicationFeedback {
let mut zf = ReplicationFeedback::empty();
let mut rf = ReplicationFeedback::empty();
let nfields = buf.get_u8();
for _ in 0..nfields {
let key = read_cstr(&mut buf).unwrap();
@@ -939,31 +939,31 @@ impl ReplicationFeedback {
b"current_timeline_size" => {
let len = buf.get_i32();
assert_eq!(len, 8);
zf.current_timeline_size = buf.get_u64();
rf.current_timeline_size = buf.get_u64();
}
b"ps_writelsn" => {
let len = buf.get_i32();
assert_eq!(len, 8);
zf.ps_writelsn = buf.get_u64();
rf.ps_writelsn = buf.get_u64();
}
b"ps_flushlsn" => {
let len = buf.get_i32();
assert_eq!(len, 8);
zf.ps_flushlsn = buf.get_u64();
rf.ps_flushlsn = buf.get_u64();
}
b"ps_applylsn" => {
let len = buf.get_i32();
assert_eq!(len, 8);
zf.ps_applylsn = buf.get_u64();
rf.ps_applylsn = buf.get_u64();
}
b"ps_replytime" => {
let len = buf.get_i32();
assert_eq!(len, 8);
let raw_time = buf.get_i64();
if raw_time > 0 {
zf.ps_replytime = *PG_EPOCH + Duration::from_micros(raw_time as u64);
rf.ps_replytime = *PG_EPOCH + Duration::from_micros(raw_time as u64);
} else {
zf.ps_replytime = *PG_EPOCH - Duration::from_micros(-raw_time as u64);
rf.ps_replytime = *PG_EPOCH - Duration::from_micros(-raw_time as u64);
}
}
_ => {
@@ -976,8 +976,8 @@ impl ReplicationFeedback {
}
}
}
trace!("ReplicationFeedback parsed is {:?}", zf);
zf
trace!("ReplicationFeedback parsed is {:?}", rf);
rf
}
}
@@ -987,29 +987,29 @@ mod tests {
#[test]
fn test_replication_feedback_serialization() {
let mut zf = ReplicationFeedback::empty();
// Fill zf with some values
zf.current_timeline_size = 12345678;
let mut rf = ReplicationFeedback::empty();
// Fill rf with some values
rf.current_timeline_size = 12345678;
// Set rounded time to be able to compare it with deserialized value,
// because it is rounded up to microseconds during serialization.
zf.ps_replytime = *PG_EPOCH + Duration::from_secs(100_000_000);
rf.ps_replytime = *PG_EPOCH + Duration::from_secs(100_000_000);
let mut data = BytesMut::new();
zf.serialize(&mut data).unwrap();
rf.serialize(&mut data).unwrap();
let zf_parsed = ReplicationFeedback::parse(data.freeze());
assert_eq!(zf, zf_parsed);
let rf_parsed = ReplicationFeedback::parse(data.freeze());
assert_eq!(rf, rf_parsed);
}
#[test]
fn test_replication_feedback_unknown_key() {
let mut zf = ReplicationFeedback::empty();
// Fill zf with some values
zf.current_timeline_size = 12345678;
let mut rf = ReplicationFeedback::empty();
// Fill rf with some values
rf.current_timeline_size = 12345678;
// Set rounded time to be able to compare it with deserialized value,
// because it is rounded up to microseconds during serialization.
zf.ps_replytime = *PG_EPOCH + Duration::from_secs(100_000_000);
rf.ps_replytime = *PG_EPOCH + Duration::from_secs(100_000_000);
let mut data = BytesMut::new();
zf.serialize(&mut data).unwrap();
rf.serialize(&mut data).unwrap();
// Add an extra field to the buffer and adjust number of keys
if let Some(first) = data.first_mut() {
@@ -1021,8 +1021,8 @@ mod tests {
data.put_u64(42);
// Parse serialized data and check that new field is not parsed
let zf_parsed = ReplicationFeedback::parse(data.freeze());
assert_eq!(zf, zf_parsed);
let rf_parsed = ReplicationFeedback::parse(data.freeze());
assert_eq!(rf, rf_parsed);
}
#[test]

View File

@@ -132,7 +132,7 @@ pub enum TenantState {
/// A repository corresponds to one .neon directory. One repository holds multiple
/// timelines, forked off from the same initial call to 'initdb'.
impl Tenant {
/// Get Timeline handle for given zenith timeline ID.
/// Get Timeline handle for given Neon timeline ID.
/// This function is idempotent. It doesn't change internal state in any way.
pub fn get_timeline(&self, timeline_id: TimelineId) -> anyhow::Result<Arc<Timeline>> {
self.timelines

View File

@@ -183,7 +183,7 @@ pageserver_send(NeonRequest * request)
if (!connected)
pageserver_connect();
req_buff = zm_pack_request(request);
req_buff = nm_pack_request(request);
/*
* Send request.
@@ -204,7 +204,7 @@ pageserver_send(NeonRequest * request)
if (message_level_is_interesting(PageStoreTrace))
{
char *msg = zm_to_string((NeonMessage *) request);
char *msg = nm_to_string((NeonMessage *) request);
neon_log(PageStoreTrace, "sent request: %s", msg);
pfree(msg);
@@ -230,12 +230,12 @@ pageserver_receive(void)
else if (resp_buff.len == -2)
neon_log(ERROR, "could not read COPY data: %s", PQerrorMessage(pageserver_conn));
}
resp = zm_unpack_response(&resp_buff);
resp = nm_unpack_response(&resp_buff);
PQfreemem(resp_buff.data);
if (message_level_is_interesting(PageStoreTrace))
{
char *msg = zm_to_string((NeonMessage *) resp);
char *msg = nm_to_string((NeonMessage *) resp);
neon_log(PageStoreTrace, "got response: %s", msg);
pfree(msg);
@@ -282,9 +282,9 @@ page_server_api api = {
static bool
check_neon_id(char **newval, void **extra, GucSource source)
{
uint8 zid[16];
uint8 id[16];
return **newval == '\0' || HexDecodeString(zid, *newval, 16);
return **newval == '\0' || HexDecodeString(id, *newval, 16);
}
static char *

View File

@@ -128,9 +128,9 @@ typedef struct
* message */
} NeonErrorResponse;
extern StringInfoData zm_pack_request(NeonRequest * msg);
extern NeonResponse * zm_unpack_response(StringInfo s);
extern char *zm_to_string(NeonMessage * msg);
extern StringInfoData nm_pack_request(NeonRequest * msg);
extern NeonResponse * nm_unpack_response(StringInfo s);
extern char *nm_to_string(NeonMessage * msg);
/*
* API

View File

@@ -160,7 +160,7 @@ page_server_request(void const *req)
StringInfoData
zm_pack_request(NeonRequest * msg)
nm_pack_request(NeonRequest * msg)
{
StringInfoData s;
@@ -235,7 +235,7 @@ zm_pack_request(NeonRequest * msg)
}
NeonResponse *
zm_unpack_response(StringInfo s)
nm_unpack_response(StringInfo s)
{
NeonMessageTag tag = pq_getmsgbyte(s);
NeonResponse *resp = NULL;
@@ -329,7 +329,7 @@ zm_unpack_response(StringInfo s)
/* dump to json for debugging / error reporting purposes */
char *
zm_to_string(NeonMessage * msg)
nm_to_string(NeonMessage * msg)
{
StringInfoData s;
@@ -632,7 +632,7 @@ neon_init(void)
* It may cause problems with XLogFlush. So return pointer backward to the origin of the page.
*/
static XLogRecPtr
zm_adjust_lsn(XLogRecPtr lsn)
nm_adjust_lsn(XLogRecPtr lsn)
{
/*
* If lsn points to the beging of first record on page or segment, then
@@ -685,7 +685,7 @@ neon_get_request_lsn(bool *latest, RelFileNode rnode, ForkNumber forknum, BlockN
elog(DEBUG1, "neon_get_request_lsn GetLastWrittenLSN lsn %X/%X ",
(uint32) ((lsn) >> 32), (uint32) (lsn));
lsn = zm_adjust_lsn(lsn);
lsn = nm_adjust_lsn(lsn);
/*
* Is it possible that the last-written LSN is ahead of last flush
@@ -1569,7 +1569,7 @@ neon_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
*/
lsn = GetXLogInsertRecPtr();
lsn = zm_adjust_lsn(lsn);
lsn = nm_adjust_lsn(lsn);
/*
* Flush it, too. We don't actually care about it here, but let's uphold

View File

@@ -167,7 +167,7 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<SafeKeeperState>
remote_consistent_lsn: Lsn(0),
peers: Peers(vec![]),
});
// migrate to hexing some zids
// migrate to hexing some ids
} else if version == 2 {
info!("reading safekeeper control file version {}", version);
let oldstate = SafeKeeperStateV2::des(&buf[..buf.len()])?;