Christian Schwarz abfac6ef2a Merge remote-tracking branch 'origin/main' into HEAD
Conflicts:
	libs/pageserver_api/src/models.rs
	pageserver/src/lib.rs
	pageserver/src/tenant_mgr.rs

There was a merge conflict following attach_tenant() where
I didn't understand why Git called out a conflict.
I went through the changes in `origin/main` since the last
merge done by Heikki, couldn't find anything that would
conflict there.

Original git diff right after after `git merge` follows:

   diff --cc libs/pageserver_api/src/models.rs
   index 750585b58,aefd79336..000000000
   --- a/libs/pageserver_api/src/models.rs
   +++ b/libs/pageserver_api/src/models.rs
   @@@ -15,17 -15,13 +15,27 @@@ use bytes::{BufMut, Bytes, BytesMut}
     /// A state of a tenant in pageserver's memory.
     #[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
     pub enum TenantState {
   ++<<<<<<< HEAD
    +    // This tenant is being loaded from local disk
    +    Loading,
    +    // This tenant is being downloaded from cloud storage.
    +    Attaching,
    +    /// Tenant is fully operational
    +    Active,
    +    /// A tenant is recognized by pageserver, but it is being detached or the system is being
    +    /// shut down.
    +    Paused,
    +    /// A tenant is recognized by the pageserver, but can no longer used for any operations,
    +    /// because it failed to get activated.
   ++=======
   +     /// Tenant is fully operational, its background jobs might be running or not.
   +     Active { background_jobs_running: bool },
   +     /// A tenant is recognized by pageserver, but it is being detached or the
   +     /// system is being shut down.
   +     Paused,
   +     /// A tenant is recognized by the pageserver, but can no longer be used for
   +     /// any operations, because it failed to be activated.
   ++>>>>>>> origin/main
         Broken,
     }

   diff --cc pageserver/src/lib.rs
   index 2d5b66f57,e3112223e..000000000
   --- a/pageserver/src/lib.rs
   +++ b/pageserver/src/lib.rs
   @@@ -22,7 -23,11 +23,13 @@@ pub mod walreceiver
     pub mod walrecord;
     pub mod walredo;

   ++<<<<<<< HEAD
   ++=======
   + use std::collections::HashMap;
   + use std::path::Path;
   +
   ++>>>>>>> origin/main
     use tracing::info;
    -use utils:🆔:{TenantId, TimelineId};

     use crate::task_mgr::TaskKind;

   @@@ -103,14 -108,51 +110,64 @@@ fn exponential_backoff_duration_seconds
         }
     }

   ++<<<<<<< HEAD
    +/// A suffix to be used during file sync from the remote storage,
    +/// to ensure that we do not leave corrupted files that pretend to be layers.
    +const TEMP_FILE_SUFFIX: &str = "___temp";
   ++=======
   + /// A newtype to store arbitrary data grouped by tenant and timeline ids.
   + /// One could use [`utils:🆔:TenantTimelineId`] for grouping, but that would
   + /// not include the cases where a certain tenant has zero timelines.
   + /// This is sometimes important: a tenant could be registered during initial load from FS,
   + /// even if he has no timelines on disk.
   + #[derive(Debug)]
   + pub struct TenantTimelineValues<T>(HashMap<TenantId, HashMap<TimelineId, T>>);
   +
   + impl<T> TenantTimelineValues<T> {
   +     fn new() -> Self {
   +         Self(HashMap::new())
   +     }
   + }
   +
   + /// The name of the metadata file pageserver creates per timeline.
   + /// Full path: `tenants/<tenant_id>/timelines/<timeline_id>/metadata`.
   + pub const METADATA_FILE_NAME: &str = "metadata";
   +
   + /// Per-tenant configuration file.
   + /// Full path: `tenants/<tenant_id>/config`.
   + pub const TENANT_CONFIG_NAME: &str = "config";
   +
   + /// A suffix used for various temporary files. Any temporary files found in the
   + /// data directory at pageserver startup can be automatically removed.
   + pub const TEMP_FILE_SUFFIX: &str = "___temp";
   +
   + /// A marker file to mark that a timeline directory was not fully initialized.
   + /// If a timeline directory with this marker is encountered at pageserver startup,
   + /// the timeline directory and the marker file are both removed.
   + /// Full path: `tenants/<tenant_id>/timelines/<timeline_id>___uninit`.
   + pub const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit";
   +
   + pub fn is_temporary(path: &Path) -> bool {
   +     match path.file_name() {
   +         Some(name) => name.to_string_lossy().ends_with(TEMP_FILE_SUFFIX),
   +         None => false,
   +     }
   + }
   +
   + pub fn is_uninit_mark(path: &Path) -> bool {
   +     match path.file_name() {
   +         Some(name) => name
   +             .to_string_lossy()
   +             .ends_with(TIMELINE_UNINIT_MARK_SUFFIX),
   +         None => false,
   ++    }
   ++}
   ++>>>>>>> origin/main
    +
    +pub fn is_temporary(path: &std::path::Path) -> bool {
    +    match path.file_name() {
    +        Some(name) => name.to_string_lossy().ends_with(TEMP_FILE_SUFFIX),
    +        None => false,
         }
     }

   diff --cc pageserver/src/tenant_mgr.rs
   index 73593bc48,061d7fa19..000000000
   --- a/pageserver/src/tenant_mgr.rs
   +++ b/pageserver/src/tenant_mgr.rs
   @@@ -13,11 -13,18 +13,22 @@@ use tracing::*
     use remote_storage::GenericRemoteStorage;

     use crate::config::PageServerConf;
   ++<<<<<<< HEAD
   ++=======
   + use crate::http::models::TenantInfo;
   + use crate::storage_sync::index::{LayerFileMetadata, RemoteIndex, RemoteTimelineIndex};
   + use crate::storage_sync::{self, LocalTimelineInitStatus, SyncStartupData, TimelineLocalFiles};
   ++>>>>>>> origin/main
     use crate::task_mgr::{self, TaskKind};
    -use crate::tenant::{
    -    ephemeral_file::is_ephemeral_file, metadata::TimelineMetadata, Tenant, TenantState,
    -};
    +use crate::tenant::{Tenant, TenantState};
     use crate::tenant_config::TenantConfOpt;
   ++<<<<<<< HEAD
   ++=======
   + use crate::walredo::PostgresRedoManager;
   + use crate::{is_temporary, is_uninit_mark, METADATA_FILE_NAME, TEMP_FILE_SUFFIX};
   ++>>>>>>> origin/main

    -use utils::crashsafe::{self, path_with_suffix_extension};
    +use utils::fs_ext::PathExt;
     use utils:🆔:{TenantId, TimelineId};

     mod tenants_state {
   @@@ -341,87 -521,334 +352,247 @@@ pub fn list_tenants() -> Vec<(TenantId
             .collect()
     }

    -#[derive(Debug)]
    -pub enum TenantAttachData {
    -    Ready(HashMap<TimelineId, TimelineLocalFiles>),
    -    Broken(anyhow::Error),
    -}
    -/// Attempts to collect information about all tenant and timelines, existing on the local FS.
    -/// If finds any, deletes all temporary files and directories, created before. Also removes empty directories,
    -/// that may appear due to such removals.
    -/// Does not fail on particular timeline or tenant collection errors, rather logging them and ignoring the entities.
    -fn local_tenant_timeline_files(
    -    config: &'static PageServerConf,
    -) -> anyhow::Result<HashMap<TenantId, TenantAttachData>> {
    -    let _entered = info_span!("local_tenant_timeline_files").entered();
    -
    -    let mut local_tenant_timeline_files = HashMap::new();
    -    let tenants_dir = config.tenants_path();
    -    for tenants_dir_entry in fs::read_dir(&tenants_dir)
    -        .with_context(|| format!("Failed to list tenants dir {}", tenants_dir.display()))?
    -    {
    -        match &tenants_dir_entry {
    -            Ok(tenants_dir_entry) => {
    -                let tenant_dir_path = tenants_dir_entry.path();
    -                if is_temporary(&tenant_dir_path) {
    -                    info!(
    -                        "Found temporary tenant directory, removing: {}",
    -                        tenant_dir_path.display()
    -                    );
    -                    if let Err(e) = fs::remove_dir_all(&tenant_dir_path) {
    -                        error!(
    -                            "Failed to remove temporary directory '{}': {:?}",
    -                            tenant_dir_path.display(),
    -                            e
    -                        );
    -                    }
    -                } else {
    -                    match collect_timelines_for_tenant(config, &tenant_dir_path) {
    -                        Ok((tenant_id, TenantAttachData::Broken(e))) => {
    -                            local_tenant_timeline_files.entry(tenant_id).or_insert(TenantAttachData::Broken(e));
    -                        },
    -                        Ok((tenant_id, TenantAttachData::Ready(collected_files))) => {
    -                            if collected_files.is_empty() {
    -                                match remove_if_empty(&tenant_dir_path) {
    -                                    Ok(true) => info!("Removed empty tenant directory {}", tenant_dir_path.display()),
    -                                    Ok(false) => {
    -                                        // insert empty timeline entry: it has some non-temporary files inside that we cannot remove
    -                                        // so make obvious for HTTP API callers, that something exists there and try to load the tenant
    -                                        let _ = local_tenant_timeline_files.entry(tenant_id).or_insert_with(|| TenantAttachData::Ready(HashMap::new()));
    -                                    },
    -                                    Err(e) => error!("Failed to remove empty tenant directory: {e:?}"),
    -                                }
    -                            } else {
    -                                match local_tenant_timeline_files.entry(tenant_id) {
    -                                    hash_map::Entry::Vacant(entry) => {
    -                                        entry.insert(TenantAttachData::Ready(collected_files));
    -                                    }
    -                                    hash_map::Entry::Occupied(entry) =>{
    -                                        if let TenantAttachData::Ready(old_timelines) = entry.into_mut() {
    -                                            old_timelines.extend(collected_files);
    -                                        }
    -                                    },
    -                                }
    -                            }
    -                        },
    -                        Err(e) => error!(
    -                            "Failed to collect tenant files from dir '{}' for entry {:?}, reason: {:#}",
    -                            tenants_dir.display(),
    -                            tenants_dir_entry,
    -                            e
    -                        ),
    -                    }
    +/// Execute Attach mgmt API command.
    +///
    +/// Downloading all the tenant data is performed in the background, this merely
    +/// spawns the background task and returns quickly.
    +pub async fn attach_tenant(
    +    conf: &'static PageServerConf,
    +    tenant_id: TenantId,
    +    remote_storage: &GenericRemoteStorage,
    +) -> anyhow::Result<()> {
    +    match tenants_state::write_tenants().entry(tenant_id) {
    +        hash_map::Entry::Occupied(e) => {
    +            // Cannot attach a tenant that already exists. The error message depends on
    +            // the state it's in.
    +            match e.get().current_state() {
    +                TenantState::Attaching => {
    +                    anyhow::bail!("tenant {tenant_id} attach is already in progress")
                     }
   ++<<<<<<< HEAD
    +                current_state => {
    +                    anyhow::bail!("tenant already exists, current state: {current_state:?}")
   ++=======
   +             }
   +             Err(e) => error!(
   +                 "Failed to list tenants dir entry {:?} in directory {}, reason: {:?}",
   +                 tenants_dir_entry,
   +                 tenants_dir.display(),
   +                 e
   +             ),
   +         }
   +     }
   +
   +     info!(
   +         "Collected files for {} tenants",
   +         local_tenant_timeline_files.len(),
   +     );
   +     Ok(local_tenant_timeline_files)
   + }
   +
   + fn remove_if_empty(tenant_dir_path: &Path) -> anyhow::Result<bool> {
   +     let directory_is_empty = tenant_dir_path
   +         .read_dir()
   +         .with_context(|| {
   +             format!(
   +                 "Failed to read directory '{}' contents",
   +                 tenant_dir_path.display()
   +             )
   +         })?
   +         .next()
   +         .is_none();
   +
   +     if directory_is_empty {
   +         fs::remove_dir_all(&tenant_dir_path).with_context(|| {
   +             format!(
   +                 "Failed to remove empty directory '{}'",
   +                 tenant_dir_path.display(),
   +             )
   +         })?;
   +
   +         Ok(true)
   +     } else {
   +         Ok(false)
   +     }
   + }
   +
   + fn collect_timelines_for_tenant(
   +     config: &'static PageServerConf,
   +     tenant_path: &Path,
   + ) -> anyhow::Result<(TenantId, TenantAttachData)> {
   +     let tenant_id = tenant_path
   +         .file_name()
   +         .and_then(OsStr::to_str)
   +         .unwrap_or_default()
   +         .parse::<TenantId>()
   +         .context("Could not parse tenant id out of the tenant dir name")?;
   +     let timelines_dir = config.timelines_path(&tenant_id);
   +
   +     if !timelines_dir.as_path().is_dir() {
   +         return Ok((
   +             tenant_id,
   +             TenantAttachData::Broken(anyhow::anyhow!(
   +                 "Tenant {} has no timelines directory at {}",
   +                 tenant_id,
   +                 timelines_dir.display()
   +             )),
   +         ));
   +     }
   +
   +     let mut tenant_timelines = HashMap::new();
   +     for timelines_dir_entry in fs::read_dir(&timelines_dir)
   +         .with_context(|| format!("Failed to list timelines dir entry for tenant {tenant_id}"))?
   +     {
   +         match timelines_dir_entry {
   +             Ok(timelines_dir_entry) => {
   +                 let timeline_dir = timelines_dir_entry.path();
   +                 if is_temporary(&timeline_dir) {
   +                     info!(
   +                         "Found temporary timeline directory, removing: {}",
   +                         timeline_dir.display()
   +                     );
   +                     if let Err(e) = fs::remove_dir_all(&timeline_dir) {
   +                         error!(
   +                             "Failed to remove temporary directory '{}': {:?}",
   +                             timeline_dir.display(),
   +                             e
   +                         );
   +                     }
   +                 } else if is_uninit_mark(&timeline_dir) {
   +                     let timeline_uninit_mark_file = &timeline_dir;
   +                     info!(
   +                         "Found an uninit mark file {}, removing the timeline and its uninit mark",
   +                         timeline_uninit_mark_file.display()
   +                     );
   +                     let timeline_id = timeline_uninit_mark_file
   +                         .file_stem()
   +                         .and_then(OsStr::to_str)
   +                         .unwrap_or_default()
   +                         .parse::<TimelineId>()
   +                         .with_context(|| {
   +                             format!(
   +                                 "Could not parse timeline id out of the timeline uninit mark name {}",
   +                                 timeline_uninit_mark_file.display()
   +                             )
   +                         })?;
   +                     let timeline_dir = config.timeline_path(&timeline_id, &tenant_id);
   +                     if let Err(e) =
   +                         remove_timeline_and_uninit_mark(&timeline_dir, timeline_uninit_mark_file)
   +                     {
   +                         error!("Failed to clean up uninit marked timeline: {e:?}");
   +                     }
   +                 } else {
   +                     let timeline_id = timeline_dir
   +                         .file_name()
   +                         .and_then(OsStr::to_str)
   +                         .unwrap_or_default()
   +                         .parse::<TimelineId>()
   +                         .with_context(|| {
   +                             format!(
   +                                 "Could not parse timeline id out of the timeline dir name {}",
   +                                 timeline_dir.display()
   +                             )
   +                         })?;
   +                     let timeline_uninit_mark_file =
   +                         config.timeline_uninit_mark_file_path(tenant_id, timeline_id);
   +                     if timeline_uninit_mark_file.exists() {
   +                         info!("Found an uninit mark file for timeline {tenant_id}/{timeline_id}, removing the timeline and its uninit mark");
   +                         if let Err(e) = remove_timeline_and_uninit_mark(
   +                             &timeline_dir,
   +                             &timeline_uninit_mark_file,
   +                         ) {
   +                             error!("Failed to clean up uninit marked timeline: {e:?}");
   +                         }
   +                     } else {
   +                         match collect_timeline_files(&timeline_dir) {
   +                             Ok((metadata, timeline_files)) => {
   +                                 tenant_timelines.insert(
   +                                     timeline_id,
   +                                     TimelineLocalFiles::collected(metadata, timeline_files),
   +                                 );
   +                             }
   +                             Err(e) => {
   +                                 error!(
   +                                     "Failed to process timeline dir contents at '{}', reason: {:?}",
   +                                     timeline_dir.display(),
   +                                     e
   +                                 );
   +                                 match remove_if_empty(&timeline_dir) {
   +                                     Ok(true) => info!(
   +                                         "Removed empty timeline directory {}",
   +                                         timeline_dir.display()
   +                                     ),
   +                                     Ok(false) => (),
   +                                     Err(e) => {
   +                                         error!("Failed to remove empty timeline directory: {e:?}")
   +                                     }
   +                                 }
   +                             }
   +                         }
   +                     }
   ++>>>>>>> origin/main
                     }
                 }
    -            Err(e) => {
    -                error!("Failed to list timelines for entry tenant {tenant_id}, reason: {e:?}")
    -            }
    +        }
    +        hash_map::Entry::Vacant(v) => {
    +            let tenant = Tenant::spawn_attach(conf, tenant_id, remote_storage)?;
    +            v.insert(tenant);
    +            Ok(())
             }
         }
    -
    -    if tenant_timelines.is_empty() {
    -        // this is normal, we've removed all broken, empty and temporary timeline dirs
    -        // but should allow the tenant to stay functional and allow creating new timelines
    -        // on a restart, we require tenants to have the timelines dir, so leave it on disk
    -        debug!("Tenant {tenant_id} has no timelines loaded");
    -    }
    -
    -    Ok((tenant_id, TenantAttachData::Ready(tenant_timelines)))
     }

    -fn remove_timeline_and_uninit_mark(timeline_dir: &Path, uninit_mark: &Path) -> anyhow::Result<()> {
    -    fs::remove_dir_all(&timeline_dir)
    -        .or_else(|e| {
    -            if e.kind() == std::io::ErrorKind::NotFound {
    -                // we can leave the uninit mark without a timeline dir,
    -                // just remove the mark then
    -                Ok(())
    -            } else {
    -                Err(e)
    -            }
    -        })
    -        .with_context(|| {
    -            format!(
    -                "Failed to remove unit marked timeline directory {}",
    -                timeline_dir.display()
    -            )
    -        })?;
    -    fs::remove_file(&uninit_mark).with_context(|| {
    -        format!(
    -            "Failed to remove timeline uninit mark file {}",
    -            uninit_mark.display()
    -        )
    -    })?;
    +#[cfg(feature = "testing")]
    +use {
    +    crate::repository::GcResult, pageserver_api::models::TimelineGcRequest,
    +    utils::http::error::ApiError,
    +};

    -    Ok(())
    -}
    +#[cfg(feature = "testing")]
    +pub fn immediate_gc(
    +    tenant_id: TenantId,
    +    timeline_id: TimelineId,
    +    gc_req: TimelineGcRequest,
    +) -> Result<tokio::sync::oneshot::Receiver<Result<GcResult, anyhow::Error>>, ApiError> {
    +    let guard = tenants_state::read_tenants();

    -// discover timeline files and extract timeline metadata
    -//  NOTE: ephemeral files are excluded from the list
    -fn collect_timeline_files(
    -    timeline_dir: &Path,
    -) -> anyhow::Result<(TimelineMetadata, HashMap<PathBuf, LayerFileMetadata>)> {
    -    let mut timeline_files = HashMap::new();
    -    let mut timeline_metadata_path = None;
    -
    -    let timeline_dir_entries =
    -        fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?;
    -    for entry in timeline_dir_entries {
    -        let entry_path = entry.context("Failed to list timeline dir entry")?.path();
    -        let metadata = entry_path.metadata()?;
    -
    -        if metadata.is_file() {
    -            if entry_path.file_name().and_then(OsStr::to_str) == Some(METADATA_FILE_NAME) {
    -                timeline_metadata_path = Some(entry_path);
    -            } else if is_ephemeral_file(&entry_path.file_name().unwrap().to_string_lossy()) {
    -                debug!("skipping ephemeral file {}", entry_path.display());
    -                continue;
    -            } else if is_temporary(&entry_path) {
    -                info!("removing temp timeline file at {}", entry_path.display());
    -                fs::remove_file(&entry_path).with_context(|| {
    -                    format!(
    -                        "failed to remove temp download file at {}",
    -                        entry_path.display()
    -                    )
    -                })?;
    -            } else {
    -                let layer_metadata = LayerFileMetadata::new(metadata.len());
    -                timeline_files.insert(entry_path, layer_metadata);
    +    let tenant = guard
    +        .get(&tenant_id)
    +        .map(Arc::clone)
    +        .with_context(|| format!("Tenant {tenant_id} not found"))
    +        .map_err(ApiError::NotFound)?;
    +
    +    let gc_horizon = gc_req.gc_horizon.unwrap_or_else(|| tenant.get_gc_horizon());
    +    // Use tenant's pitr setting
    +    let pitr = tenant.get_pitr_interval();
    +
    +    // Run in task_mgr to avoid race with detach operation
    +    let (task_done, wait_task_done) = tokio::sync::oneshot::channel();
    +    task_mgr::spawn(
    +        &tokio::runtime::Handle::current(),
    +        TaskKind::GarbageCollector,
    +        Some(tenant_id),
    +        Some(timeline_id),
    +        &format!("timeline_gc_handler garbage collection run for tenant {tenant_id} timeline {timeline_id}"),
    +        false,
    +        async move {
    +            fail::fail_point!("immediate_gc_task_pre");
    +            let result = tenant
    +                .gc_iteration(Some(timeline_id), gc_horizon, pitr, true)
    +                .instrument(info_span!("manual_gc", tenant = %tenant_id, timeline = %timeline_id))
    +                .await;
    +                // FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
    +                // better once the types support it.
    +            match task_done.send(result) {
    +                Ok(_) => (),
    +                Err(result) => error!("failed to send gc result: {result:?}"),
                 }
    +            Ok(())
             }
    -    }
    -
    -    // FIXME (rodionov) if attach call succeeded, and then pageserver is restarted before download is completed
    -    //   then attach is lost. There would be no retries for that,
    -    //   initial collect will fail because there is no metadata.
    -    //   We either need to start download if we see empty dir after restart or attach caller should
    -    //   be aware of that and retry attach if awaits_download for timeline switched from true to false
    -    //   but timelinne didn't appear locally.
    -    //   Check what happens with remote index in that case.
    -    let timeline_metadata_path = match timeline_metadata_path {
    -        Some(path) => path,
    -        None => anyhow::bail!("No metadata file found in the timeline directory"),
    -    };
    -    let metadata = TimelineMetadata::from_bytes(
    -        &fs::read(&timeline_metadata_path).context("Failed to read timeline metadata file")?,
    -    )
    -    .context("Failed to parse timeline metadata file bytes")?;
    -
    -    anyhow::ensure!(
    -        metadata.ancestor_timeline().is_some() || !timeline_files.is_empty(),
    -        "Timeline has no ancestor and no layer files"
         );

    -    Ok((metadata, timeline_files))
    +    // drop the guard until after we've spawned the task so that timeline shutdown will wait for the task
    +    drop(guard);
    +
    +    Ok(wait_task_done)
     }
   diff --git a/vendor/postgres-v14 b/vendor/postgres-v14
   index da50d99db..360ff1c63 160000
   --- a/vendor/postgres-v14
   +++ b/vendor/postgres-v14
   @@ -1 +1 @@
   -Subproject commit da50d99db54848f7a3e910f920aaad7dc6915d36
   +Subproject commit 360ff1c637a57d351a7a5a391d8e8afd8fde8c3a
   diff --git a/vendor/postgres-v15 b/vendor/postgres-v15
   index 780c3f8e3..d31b3f7c6 160000
   --- a/vendor/postgres-v15
   +++ b/vendor/postgres-v15
   @@ -1 +1 @@
   -Subproject commit 780c3f8e3524c2e32a2e28884c7b647fcebf71d7
   +Subproject commit d31b3f7c6d108e52c8bb11e812ce4e266501ea3d
2022-11-25 12:19:53 -05:00
2022-11-24 20:07:41 +04:00
2022-11-08 20:15:54 +04:00
2022-11-23 22:05:59 +04:00
2022-11-23 22:05:59 +04:00
2022-11-23 22:05:59 +04:00
2022-11-23 22:05:59 +04:00
2022-08-09 14:18:25 +03:00
2022-11-23 22:05:59 +04:00
2021-05-27 15:33:08 +03:00
2022-11-18 09:10:32 +02:00
2022-08-22 14:57:09 +01:00
2022-10-04 14:53:01 +03:00
2022-11-23 22:05:59 +04:00
2022-10-09 08:21:11 +03:00
2022-08-22 14:57:09 +01:00

Neon

Neon is a serverless open-source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes the PostgreSQL storage layer by redistributing data across a cluster of nodes.

The project used to be called "Zenith". Many of the commands and code comments still refer to "zenith", but we are in the process of renaming things.

Quick start

Join the waitlist for our free tier to receive your serverless postgres instance. Then connect to it with your preferred postgres client (psql, dbeaver, etc) or use the online SQL editor.

Alternatively, compile and run the project locally.

Architecture overview

A Neon installation consists of compute nodes and a Neon storage engine.

Compute nodes are stateless PostgreSQL nodes backed by the Neon storage engine.

The Neon storage engine consists of two major components:

  • Pageserver. Scalable storage backend for the compute nodes.
  • WAL service. The service receives WAL from the compute node and ensures that it is stored durably.

Pageserver consists of:

  • Repository - Neon storage implementation.
  • WAL receiver - service that receives WAL from WAL service and stores it in the repository.
  • Page service - service that communicates with compute nodes and responds with pages from the repository.
  • WAL redo - service that builds pages from base images and WAL records on Page service request

Running local installation

Installing dependencies on Linux

  1. Install build dependencies and other applicable packages
  • On Ubuntu or Debian, this set of packages should be sufficient to build the code:
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client protobuf-compiler
  • On Fedora, these packages are needed:
dnf install flex bison readline-devel zlib-devel openssl-devel \
  libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib protobuf-compiler
  1. Install Rust
# recommended approach from https://www.rust-lang.org/tools/install
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh

Installing dependencies on OSX (12.3.1)

  1. Install XCode and dependencies
xcode-select --install
brew install protobuf etcd openssl flex bison
  1. Install Rust
# recommended approach from https://www.rust-lang.org/tools/install
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
  1. Install PostgreSQL Client
# from https://stackoverflow.com/questions/44654216/correct-way-to-install-psql-without-full-postgres-on-macos
brew install libpq
brew link --force libpq

Rustc version

The project uses rust toolchain file to define the version it's built with in CI for testing and local builds.

This file is automatically picked up by rustup that installs (if absent) and uses the toolchain version pinned in the file.

rustup users who want to build with another toolchain can use rustup override command to set a specific toolchain for the project's directory.

non-rustup users most probably are not getting the same toolchain automatically from the file, so are responsible to manually verify their toolchain matches the version in the file. Newer rustc versions most probably will work fine, yet older ones might not be supported due to some new features used by the project or the crates.

Building on Linux

  1. Build neon and patched postgres
# Note: The path to the neon sources can not contain a space.

git clone --recursive https://github.com/neondatabase/neon.git
cd neon

# The preferred and default is to make a debug build. This will create a
# demonstrably slower build than a release build. For a release build,
# use "BUILD_TYPE=release make -j`nproc`"

make -j`nproc`

Building on OSX

  1. Build neon and patched postgres
# Note: The path to the neon sources can not contain a space.

git clone --recursive https://github.com/neondatabase/neon.git
cd neon

# The preferred and default is to make a debug build. This will create a
# demonstrably slower build than a release build. For a release build,
# use "BUILD_TYPE=release make -j`sysctl -n hw.logicalcpu`"

make -j`sysctl -n hw.logicalcpu`

Dependency installation notes

To run the psql client, install the postgresql-client package or modify PATH and LD_LIBRARY_PATH to include pg_install/bin and pg_install/lib, respectively.

To run the integration tests or Python scripts (not required to use the code), install Python (3.9 or higher), and install python3 packages using ./scripts/pysync (requires poetry) in the project directory.

Running neon database

  1. Start pageserver and postgres on top of it (should be called from repo root):
# Create repository in .neon with proper paths to binaries and data
# Later that would be responsibility of a package install script
> ./target/debug/neon_local init
Starting pageserver at '127.0.0.1:64000' in '.neon'.
pageserver started, pid: 2545906
Successfully initialized timeline de200bd42b49cc1814412c7e592dd6e9
Stopped pageserver 1 process with pid 2545906

# start pageserver and safekeeper
> ./target/debug/neon_local start
Starting etcd broker using "/usr/bin/etcd"
etcd started, pid: 2545996
Starting pageserver at '127.0.0.1:64000' in '.neon'.
pageserver started, pid: 2546005
Starting safekeeper at '127.0.0.1:5454' in '.neon/safekeepers/sk1'.
safekeeper 1 started, pid: 2546041

# start postgres compute node
> ./target/debug/neon_local pg start main
Starting new postgres (v14) main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
Starting postgres node at 'host=127.0.0.1 port=55432 user=cloud_admin dbname=postgres'

# check list of running postgres instances
> ./target/debug/neon_local pg list
 NODE  ADDRESS          TIMELINE                          BRANCH NAME  LSN        STATUS
 main  127.0.0.1:55432  de200bd42b49cc1814412c7e592dd6e9  main         0/16B5BA8  running
  1. Now, it is possible to connect to postgres and run some queries:
> psql -p55432 -h 127.0.0.1 -U cloud_admin postgres
postgres=# CREATE TABLE t(key int primary key, value text);
CREATE TABLE
postgres=# insert into t values(1,1);
INSERT 0 1
postgres=# select * from t;
 key | value
-----+-------
   1 | 1
(1 row)
  1. And create branches and run postgres on them:
# create branch named migration_check
> ./target/debug/neon_local timeline branch --branch-name migration_check
Created timeline 'b3b863fa45fa9e57e615f9f2d944e601' at Lsn 0/16F9A00 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c. Ancestor timeline: 'main'

# check branches tree
> ./target/debug/neon_local timeline list
(L) main [de200bd42b49cc1814412c7e592dd6e9]
(L) ┗━ @0/16F9A00: migration_check [b3b863fa45fa9e57e615f9f2d944e601]

# start postgres on that branch
> ./target/debug/neon_local pg start migration_check --branch-name migration_check
Starting new postgres migration_check on timeline b3b863fa45fa9e57e615f9f2d944e601 ...
Extracting base backup to create postgres instance: path=.neon/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/migration_check port=55433
Starting postgres node at 'host=127.0.0.1 port=55433 user=cloud_admin dbname=postgres'

# check the new list of running postgres instances
> ./target/debug/neon_local pg list
 NODE             ADDRESS          TIMELINE                          BRANCH NAME      LSN        STATUS
 main             127.0.0.1:55432  de200bd42b49cc1814412c7e592dd6e9  main             0/16F9A38  running
 migration_check  127.0.0.1:55433  b3b863fa45fa9e57e615f9f2d944e601  migration_check  0/16F9A70  running

# this new postgres instance will have all the data from 'main' postgres,
# but all modifications would not affect data in original postgres
> psql -p55433 -h 127.0.0.1 -U cloud_admin postgres
postgres=# select * from t;
 key | value
-----+-------
   1 | 1
(1 row)

postgres=# insert into t values(2,2);
INSERT 0 1

# check that the new change doesn't affect the 'main' postgres
> psql -p55432 -h 127.0.0.1 -U cloud_admin postgres
postgres=# select * from t;
 key | value
-----+-------
   1 | 1
(1 row)
  1. If you want to run tests afterward (see below), you must stop all the running of the pageserver, safekeeper, and postgres instances you have just started. You can terminate them all with one command:
> ./target/debug/neon_local stop

Running tests

Ensure your dependencies are installed as described here.

git clone --recursive https://github.com/neondatabase/neon.git

CARGO_BUILD_FLAGS="--features=testing" make

./scripts/pytest

Documentation

Now we use README files to cover design ideas and overall architecture for each module and rustdoc style documentation comments. See also /docs/ a top-level overview of all available markdown documentation.

To view your rustdoc documentation in a browser, try running cargo doc --no-deps --open

Postgres-specific terms

Due to Neon's very close relation with PostgreSQL internals, numerous specific terms are used. The same applies to certain spelling: i.e. we use MB to denote 1024 * 1024 bytes, while MiB would be technically more correct, it's inconsistent with what PostgreSQL code and its documentation use.

To get more familiar with this aspect, refer to:

Join the development

Description
Neon: Serverless Postgres. We separated storage and compute to offer autoscaling, code-like database branching, and scale to zero.
Readme Apache-2.0 133 MiB
Languages
Rust 73.5%
Python 19.4%
C 5.2%
Dockerfile 0.8%
Shell 0.3%
Other 0.8%