diff --git a/pageserver/src/page_cache.rs b/pageserver/src/page_cache.rs index 8f360450a7..7c77ca5926 100644 --- a/pageserver/src/page_cache.rs +++ b/pageserver/src/page_cache.rs @@ -113,7 +113,7 @@ lazy_static! { pub static ref PAGECACHES: Mutex>> = Mutex::new(HashMap::new()); } -pub fn get_pagecahe(conf: PageServerConf, sys_id: u64) -> Arc { +pub fn get_pagecache(conf: PageServerConf, sys_id: u64) -> Arc { let mut pcaches = PAGECACHES.lock().unwrap(); if !pcaches.contains_key(&sys_id) { diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index a5015d4e97..06760c6f68 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -462,7 +462,7 @@ impl Connection { }) .unwrap(); - // generick ack: + // generic ack: self.write_message_noflush(&BeMessage::RowDescription) .await?; self.write_message_noflush(&BeMessage::DataRow).await?; @@ -503,7 +503,7 @@ impl Connection { self.stream.write_i16(0).await?; /* numAttributes */ self.stream.flush().await?; - let pcache = page_cache::get_pagecahe(self.conf.clone(), sysid); + let pcache = page_cache::get_pagecache(self.conf.clone(), sysid); loop { let message = self.read_message().await?; diff --git a/pageserver/src/restore_s3.rs b/pageserver/src/restore_s3.rs index e603f1d1f8..08ba3e7fa3 100644 --- a/pageserver/src/restore_s3.rs +++ b/pageserver/src/restore_s3.rs @@ -119,7 +119,7 @@ async fn restore_chunk(conf: &PageServerConf) -> Result<(), S3Error> { panic!("no base backup found"); } - let pcache = page_cache::get_pagecahe(conf.clone(), sys_id); + let pcache = page_cache::get_pagecache(conf.clone(), sys_id); pcache.init_valid_lsn(oldest_lsn); info!("{} files to restore...", slurp_futures.len()); @@ -305,7 +305,7 @@ async fn slurp_base_file( // FIXME: use constants (BLCKSZ) let mut blknum: u32 = parsed.segno * (1024 * 1024 * 1024 / 8192); - let pcache = page_cache::get_pagecahe(conf.clone(), sys_id); + let pcache = page_cache::get_pagecache(conf.clone(), sys_id); while bytes.remaining() >= 8192 { let tag = page_cache::BufferTag { diff --git a/pageserver/src/walreceiver.rs b/pageserver/src/walreceiver.rs index 54b57aaa11..9f382b2efb 100644 --- a/pageserver/src/walreceiver.rs +++ b/pageserver/src/walreceiver.rs @@ -71,7 +71,7 @@ async fn walreceiver_main( let mut caught_up = false; let sysid: u64 = identify_system.systemid().parse().unwrap(); - let pcache = page_cache::get_pagecahe(conf, sysid); + let pcache = page_cache::get_pagecache(conf, sysid); // // Start streaming the WAL, from where we left off previously. diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 77f69e8fcb..a06c87d584 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -52,7 +52,7 @@ pub fn wal_redo_main(conf: PageServerConf, sys_id: u64) { .build() .unwrap(); - let pcache = page_cache::get_pagecahe(conf.clone(), sys_id); + let pcache = page_cache::get_pagecache(conf.clone(), sys_id); // Loop forever, handling requests as they come. let walredo_channel_receiver = &pcache.walredo_receiver; diff --git a/vendor/postgres b/vendor/postgres index b1f5a5ec14..4d92fa940f 160000 --- a/vendor/postgres +++ b/vendor/postgres @@ -1 +1 @@ -Subproject commit b1f5a5ec145d5d9614eec4824074edae1162e5fa +Subproject commit 4d92fa940f102e44f2ce8dc45ea1cc6da73a063a diff --git a/walkeeper/src/bin/wal_acceptor.rs b/walkeeper/src/bin/wal_acceptor.rs index dc6d3a5111..75466bc328 100644 --- a/walkeeper/src/bin/wal_acceptor.rs +++ b/walkeeper/src/bin/wal_acceptor.rs @@ -27,14 +27,14 @@ fn main() -> Result<(), io::Error> { .short("D") .long("dir") .takes_value(true) - .help("Path to the page server data directory"), + .help("Path to the WAL acceptor data directory"), ) .arg( Arg::with_name("listen") .short("l") .long("listen") .takes_value(true) - .help("listen for incoming page requests on ip:port (default: 127.0.0.1:5454)"), + .help("listen for incoming connections on ip:port (default: 127.0.0.1:5454)"), ) .arg( Arg::with_name("pageserver") diff --git a/walkeeper/src/wal_service.rs b/walkeeper/src/wal_service.rs index 26fa0209cc..a4f0567623 100644 --- a/walkeeper/src/wal_service.rs +++ b/walkeeper/src/wal_service.rs @@ -444,7 +444,7 @@ impl System { return shared_state.hs_feedback; } - // Load and lock control file (prevent running more than one instane of safekeeper */ + // Load and lock control file (prevent running more than one instance of safekeeper fn load_control_file(&self, conf: &WalAcceptorConf) { let control_file_path = conf.data_dir .join(self.id.to_string())