mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-07 13:32:57 +00:00
Fix safekeeper -D option.
The -D option to specify working directory was broken:
$ mkdir foobar
$ ./target/debug/safekeeper -D foobar
Error: failed to open "foobar/safekeeper.log"
Caused by:
No such file or directory (os error 2)
This was because we both chdir'd into to specified directory, and also
prepended the directory to all the paths. So in the above example, it
actually tried to create the log file in "foobar/foobar/safekepeer.log"
Change it to work the same way as in the pageserver: chdir to the
specified directory, and leave 'workdir' always set to ".".
We wouldn't necessarily need the 'workdir' variable in the config at all,
and could assume that the current working directory is always the
safekeeper data directory, but I'd like to keep this consistent with the
the pageserver. The page server doesn't assume that for the sake of unit
tests. We don't currently have unit tests in the safekeeper that write
to disk but we might want to in the future.
This commit is contained in:
@@ -79,7 +79,10 @@ fn main() -> Result<()> {
|
||||
.get_matches();
|
||||
|
||||
let mut conf = SafeKeeperConf {
|
||||
data_dir: PathBuf::from("./"),
|
||||
// Always set to './'. We will chdir into the directory specified on the
|
||||
// command line, so that when the server is running, all paths are relative
|
||||
// to that.
|
||||
workdir: PathBuf::from("./"),
|
||||
daemonize: false,
|
||||
no_sync: false,
|
||||
pageserver_addr: None,
|
||||
@@ -91,10 +94,8 @@ fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
if let Some(dir) = arg_matches.value_of("datadir") {
|
||||
conf.data_dir = PathBuf::from(dir);
|
||||
|
||||
// change into the data directory.
|
||||
std::env::set_current_dir(&conf.data_dir)?;
|
||||
std::env::set_current_dir(PathBuf::from(dir))?;
|
||||
}
|
||||
|
||||
if arg_matches.is_present("no-sync") {
|
||||
@@ -129,8 +130,7 @@ fn main() -> Result<()> {
|
||||
}
|
||||
|
||||
fn start_safekeeper(conf: SafeKeeperConf) -> Result<()> {
|
||||
let log_filename = conf.data_dir.join("safekeeper.log");
|
||||
let log_file = logging::init(log_filename, conf.daemonize)?;
|
||||
let log_file = logging::init("safekeeper.log", conf.daemonize)?;
|
||||
|
||||
let http_listener = TcpListener::bind(conf.listen_http_addr.clone()).map_err(|e| {
|
||||
error!("failed to bind to address {}: {}", conf.listen_http_addr, e);
|
||||
|
||||
@@ -24,7 +24,14 @@ pub mod defaults {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SafeKeeperConf {
|
||||
pub data_dir: PathBuf,
|
||||
// Repository directory, relative to current working directory.
|
||||
// Normally, the safekeeper changes the current working directory
|
||||
// to the repository, and 'workdir' is always '.'. But we don't do
|
||||
// that during unit testing, because the current directory is global
|
||||
// to the process but different unit tests work on different
|
||||
// data directories to avoid clashing with each other.
|
||||
pub workdir: PathBuf,
|
||||
|
||||
pub daemonize: bool,
|
||||
pub no_sync: bool,
|
||||
pub listen_pg_addr: String,
|
||||
|
||||
@@ -243,7 +243,7 @@ impl ReplicationConn {
|
||||
let segno = start_pos.segment_number(wal_seg_size);
|
||||
let wal_file_name = XLogFileName(timeline, segno, wal_seg_size);
|
||||
let timeline_id = swh.timeline.get().timelineid.to_string();
|
||||
let wal_file_path = swh.conf.data_dir.join(timeline_id).join(wal_file_name);
|
||||
let wal_file_path = swh.conf.workdir.join(timeline_id).join(wal_file_name);
|
||||
Self::open_wal_file(&wal_file_path)?
|
||||
}
|
||||
};
|
||||
|
||||
@@ -54,7 +54,7 @@ async fn offload_files(
|
||||
&& IsXLogFileName(entry.file_name().to_str().unwrap())
|
||||
&& entry.metadata().unwrap().created().unwrap() <= horizon
|
||||
{
|
||||
let relpath = path.strip_prefix(&conf.data_dir).unwrap();
|
||||
let relpath = path.strip_prefix(&conf.workdir).unwrap();
|
||||
let s3path = String::from("walarchive/") + relpath.to_str().unwrap();
|
||||
if !listing.contains(&s3path) {
|
||||
let mut file = File::open(&path)?;
|
||||
@@ -97,7 +97,7 @@ async fn main_loop(conf: &SafeKeeperConf) -> Result<()> {
|
||||
.flat_map(|b| b.contents.iter().map(|o| o.key.clone()))
|
||||
.collect();
|
||||
|
||||
let n = offload_files(&bucket, &listing, &conf.data_dir, conf).await?;
|
||||
let n = offload_files(&bucket, &listing, &conf.workdir, conf).await?;
|
||||
info!("Offload {} files to S3", n);
|
||||
sleep(conf.ttl.unwrap()).await;
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ impl SharedState {
|
||||
conf: conf.clone(),
|
||||
};
|
||||
let (flush_lsn, tli) = if state.server.wal_seg_size != 0 {
|
||||
let wal_dir = conf.data_dir.join(format!("{}", timelineid));
|
||||
let wal_dir = conf.workdir.join(format!("{}", timelineid));
|
||||
find_end_of_wal(
|
||||
&wal_dir,
|
||||
state.server.wal_seg_size as usize,
|
||||
@@ -140,7 +140,7 @@ impl SharedState {
|
||||
create: CreateControlFile,
|
||||
) -> Result<(File, SafeKeeperState)> {
|
||||
let control_file_path = conf
|
||||
.data_dir
|
||||
.workdir
|
||||
.join(timelineid.to_string())
|
||||
.join(CONTROL_FILE_NAME);
|
||||
info!(
|
||||
@@ -419,12 +419,12 @@ impl Storage for FileStorage {
|
||||
let wal_file_name = XLogFileName(server.tli, segno, wal_seg_size);
|
||||
let wal_file_path = self
|
||||
.conf
|
||||
.data_dir
|
||||
.workdir
|
||||
.join(ztli.to_string())
|
||||
.join(wal_file_name.clone());
|
||||
let wal_file_partial_path = self
|
||||
.conf
|
||||
.data_dir
|
||||
.workdir
|
||||
.join(ztli.to_string())
|
||||
.join(wal_file_name.clone() + ".partial");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user