diff --git a/pageserver/src/tenant/blob_io.rs b/pageserver/src/tenant/blob_io.rs index 78ecbcb9c1..52eafc72ee 100644 --- a/pageserver/src/tenant/blob_io.rs +++ b/pageserver/src/tenant/blob_io.rs @@ -74,6 +74,7 @@ where }; dstbuf.clear(); + dstbuf.reserve(len); // Read the payload let mut remain = len; diff --git a/pageserver/src/tenant/delta_layer.rs b/pageserver/src/tenant/delta_layer.rs index a908d66200..dcd6956640 100644 --- a/pageserver/src/tenant/delta_layer.rs +++ b/pageserver/src/tenant/delta_layer.rs @@ -260,8 +260,9 @@ impl Layer for DeltaLayer { // Ok, 'offsets' now contains the offsets of all the entries we need to read let mut cursor = file.block_cursor(); + let mut buf = Vec::new(); for (entry_lsn, pos) in offsets { - let buf = cursor.read_blob(pos).with_context(|| { + cursor.read_blob_into_buf(pos, &mut buf).with_context(|| { format!( "Failed to read blob from virtual file {}", file.file.path.display() diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 59dadbb1d3..f05bf46d96 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -740,7 +740,11 @@ impl PostgresRedoProcess { // This could be problematic if there are millions of records to replay, // but in practice the number of records is usually so small that it doesn't // matter, and it's better to keep this code simple. - let mut writebuf: Vec = Vec::new(); + // + // Most requests start with a before-image with BLCKSZ bytes, followed by + // by some other WAL records. Start with a buffer that can hold that + // comfortably. + let mut writebuf: Vec = Vec::with_capacity((BLCKSZ as usize) * 3); build_begin_redo_for_block_msg(tag, &mut writebuf); if let Some(img) = base_img { build_push_page_msg(tag, &img, &mut writebuf);