Reduce size of shared buffers for wal-redo-postgtes

This commit is contained in:
Konstantin Knizhnik
2021-04-14 14:33:55 +03:00
parent a606336074
commit d9bc2109bb
5 changed files with 15 additions and 9 deletions

View File

@@ -635,6 +635,7 @@ pub fn regress_check(pg: &PostgresNode) {
let regress_run_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("tmp_check/regress");
fs::create_dir_all(regress_run_path.clone()).unwrap();
fs::create_dir_all(regress_run_path.join("testtablespace")).unwrap();
std::env::set_current_dir(regress_run_path).unwrap();
let regress_build_path =
@@ -650,7 +651,7 @@ pub fn regress_check(pg: &PostgresNode) {
format!("--dlpath={}", regress_build_path.to_str().unwrap()).as_str(),
format!(
"--schedule={}",
regress_src_path.join("parallel_schedule").to_str().unwrap()
regress_src_path.join("serial_schedule").to_str().unwrap()
)
.as_str(),
format!("--inputdir={}", regress_src_path.to_str().unwrap()).as_str(),

View File

@@ -138,7 +138,7 @@ fn open_rocksdb(conf: &PageServerConf, sys_id: u64) -> DB {
let path = conf.data_dir.join(sys_id.to_string());
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_use_fsync(true);
//opts.set_use_fsync(true);
opts.set_compression_type(DBCompressionType::Lz4);
DB::open(&opts, &path).unwrap()
}

View File

@@ -508,11 +508,11 @@ impl Connection {
loop {
let message = self.read_message().await?;
/*
if let Some(m) = &message {
info!("query({}): {:?}", sysid, m);
trace!("query({}): {:?}", sysid, m);
};
*/
if message.is_none() {
// connection was closed
return Ok(());

View File

@@ -18,6 +18,8 @@ use log::*;
use std::assert;
use std::cell::RefCell;
use std::fs;
use std::fs::OpenOptions;
use std::io::prelude::*;
use std::io::Error;
use std::sync::Arc;
use std::time::Duration;
@@ -71,7 +73,7 @@ pub fn wal_redo_main(conf: PageServerConf, sys_id: u64) {
// After that, kill it and start a new one. This is mostly to avoid
// using up all shared buffers in Postgres's shared buffer cache; we don't
// want to write any pages to disk in the WAL redo process.
for _i in 1..100 {
for _i in 1..100000 {
let request = walredo_channel_receiver.recv().unwrap();
let result = handle_apply_request(&pcache, &process, &runtime, request);
@@ -162,8 +164,11 @@ impl WalRedoProcess {
panic!("initdb failed: {}\nstderr:\n{}",
std::str::from_utf8(&initdb.stdout).unwrap(),
std::str::from_utf8(&initdb.stderr).unwrap());
}
} else {
// Limit shared cache for wal-redo-postres
let mut config = OpenOptions::new().append(true).open(datadir.join("postgresql.conf"))?;
config.write(b"shared_buffers=128kB\n")?;
}
// Start postgres itself
let mut child = Command::new("postgres")
.arg("--wal-redo")