mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-24 05:40:36 +00:00
Compare commits
26 Commits
release-38
...
arpad/virt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9e5da9613 | ||
|
|
a5acfdaa5a | ||
|
|
70052ae1ca | ||
|
|
fe69dd9a40 | ||
|
|
930eccfcaa | ||
|
|
29c2381fa5 | ||
|
|
7839cda66a | ||
|
|
d565df25d6 | ||
|
|
92b7d7f466 | ||
|
|
cfabd8b598 | ||
|
|
4432094443 | ||
|
|
83babcce30 | ||
|
|
735e20112a | ||
|
|
616e7046c7 | ||
|
|
1b916a105a | ||
|
|
d11621d904 | ||
|
|
43bb8bfdbb | ||
|
|
300a5aa05e | ||
|
|
b9c111962f | ||
|
|
83ae2bd82c | ||
|
|
f2c21447ce | ||
|
|
93dcdb293a | ||
|
|
a93274b389 | ||
|
|
a7c0e4dcd0 | ||
|
|
3b81e0c86d | ||
|
|
e5a397cf96 |
@@ -14,6 +14,7 @@
|
||||
!pgxn/
|
||||
!proxy/
|
||||
!safekeeper/
|
||||
!s3_scrubber/
|
||||
!storage_broker/
|
||||
!trace/
|
||||
!vendor/postgres-v14/
|
||||
|
||||
507
Cargo.lock
generated
507
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -7,6 +7,7 @@ members = [
|
||||
"proxy",
|
||||
"safekeeper",
|
||||
"storage_broker",
|
||||
"s3_scrubber",
|
||||
"workspace_hack",
|
||||
"trace",
|
||||
"libs/compute_api",
|
||||
@@ -37,11 +38,11 @@ async-compression = { version = "0.4.0", features = ["tokio", "gzip"] }
|
||||
flate2 = "1.0.26"
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
aws-config = { version = "0.55", default-features = false, features=["rustls"] }
|
||||
aws-sdk-s3 = "0.27"
|
||||
aws-smithy-http = "0.55"
|
||||
aws-credential-types = "0.55"
|
||||
aws-types = "0.55"
|
||||
aws-config = { version = "0.56", default-features = false, features=["rustls"] }
|
||||
aws-sdk-s3 = "0.29"
|
||||
aws-smithy-http = "0.56"
|
||||
aws-credential-types = "0.56"
|
||||
aws-types = "0.56"
|
||||
axum = { version = "0.6.20", features = ["ws"] }
|
||||
base64 = "0.13.0"
|
||||
bincode = "1.3"
|
||||
@@ -105,12 +106,12 @@ reqwest-middleware = "0.2.0"
|
||||
reqwest-retry = "0.2.2"
|
||||
routerify = "3"
|
||||
rpds = "0.13"
|
||||
rustls = "0.20"
|
||||
rustls = "0.21"
|
||||
rustls-pemfile = "1"
|
||||
rustls-split = "0.3"
|
||||
scopeguard = "1.1"
|
||||
sysinfo = "0.29.2"
|
||||
sentry = { version = "0.30", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
||||
sentry = { version = "0.31", default-features = false, features = ["backtrace", "contexts", "panic", "rustls", "reqwest" ] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
serde_with = "2.0"
|
||||
@@ -125,11 +126,11 @@ sync_wrapper = "0.1.2"
|
||||
tar = "0.4"
|
||||
test-context = "0.1"
|
||||
thiserror = "1.0"
|
||||
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
||||
tls-listener = { version = "0.7", features = ["rustls", "hyper-h1"] }
|
||||
tokio = { version = "1.17", features = ["macros"] }
|
||||
tokio-io-timeout = "1.2.0"
|
||||
tokio-postgres-rustls = "0.9.0"
|
||||
tokio-rustls = "0.23"
|
||||
tokio-postgres-rustls = "0.10.0"
|
||||
tokio-rustls = "0.24"
|
||||
tokio-stream = "0.1"
|
||||
tokio-tar = "0.3"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
@@ -143,7 +144,7 @@ tracing-subscriber = { version = "0.3", default_features = false, features = ["s
|
||||
url = "2.2"
|
||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||
walkdir = "2.3.2"
|
||||
webpki-roots = "0.23"
|
||||
webpki-roots = "0.25"
|
||||
x509-parser = "0.15"
|
||||
|
||||
## TODO replace this with tracing
|
||||
@@ -182,8 +183,8 @@ workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
||||
|
||||
## Build dependencies
|
||||
criterion = "0.5.1"
|
||||
rcgen = "0.10"
|
||||
rstest = "0.17"
|
||||
rcgen = "0.11"
|
||||
rstest = "0.18"
|
||||
tempfile = "3.4"
|
||||
tonic-build = "0.9"
|
||||
|
||||
|
||||
@@ -1,12 +1,39 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, Ok, Result};
|
||||
use postgres::Client;
|
||||
use tokio_postgres::NoTls;
|
||||
use tracing::{error, instrument};
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
|
||||
/// Create a special service table for availability checks
|
||||
/// only if it does not exist already.
|
||||
pub fn create_availability_check_data(client: &mut Client) -> Result<()> {
|
||||
let query = "
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_catalog.pg_tables
|
||||
WHERE tablename = 'health_check'
|
||||
)
|
||||
THEN
|
||||
CREATE TABLE health_check (
|
||||
id serial primary key,
|
||||
updated_at timestamptz default now()
|
||||
);
|
||||
INSERT INTO health_check VALUES (1, now())
|
||||
ON CONFLICT (id) DO UPDATE
|
||||
SET updated_at = now();
|
||||
END IF;
|
||||
END
|
||||
$$;";
|
||||
client.execute(query, &[])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update timestamp in a row in a special service table to check
|
||||
/// that we can actually write some data in this particular timeline.
|
||||
/// Create table if it's missing.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||
// Connect to the database.
|
||||
@@ -24,19 +51,15 @@ pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
|
||||
});
|
||||
|
||||
let query = "
|
||||
CREATE TABLE IF NOT EXISTS health_check (
|
||||
id serial primary key,
|
||||
updated_at timestamptz default now()
|
||||
);
|
||||
INSERT INTO health_check VALUES (1, now())
|
||||
ON CONFLICT (id) DO UPDATE
|
||||
SET updated_at = now();";
|
||||
|
||||
let result = client.simple_query(query).await?;
|
||||
|
||||
if result.len() != 2 {
|
||||
if result.len() != 1 {
|
||||
return Err(anyhow::format_err!(
|
||||
"expected 2 query results, but got {}",
|
||||
"expected 1 query result, but got {}",
|
||||
result.len()
|
||||
));
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ use utils::measured_stream::MeasuredReader;
|
||||
|
||||
use remote_storage::{DownloadError, GenericRemoteStorage, RemotePath};
|
||||
|
||||
use crate::checker::create_availability_check_data;
|
||||
use crate::pg_helpers::*;
|
||||
use crate::spec::*;
|
||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||
@@ -696,6 +697,7 @@ impl ComputeNode {
|
||||
handle_role_deletions(spec, self.connstr.as_str(), &mut client)?;
|
||||
handle_grants(spec, self.connstr.as_str())?;
|
||||
handle_extensions(spec, &mut client)?;
|
||||
create_availability_check_data(&mut client)?;
|
||||
|
||||
// 'Close' connection
|
||||
drop(client);
|
||||
@@ -1078,7 +1080,8 @@ LIMIT 100",
|
||||
|
||||
let mut download_tasks = Vec::new();
|
||||
for library in &libs_vec {
|
||||
let (ext_name, ext_path) = remote_extensions.get_ext(library, true)?;
|
||||
let (ext_name, ext_path) =
|
||||
remote_extensions.get_ext(library, true, &self.build_tag, &self.pgversion)?;
|
||||
download_tasks.push(self.download_extension(ext_name, ext_path));
|
||||
}
|
||||
let results = join_all(download_tasks).await;
|
||||
|
||||
@@ -180,7 +180,19 @@ pub async fn download_extension(
|
||||
// Create extension control files from spec
|
||||
pub fn create_control_files(remote_extensions: &RemoteExtSpec, pgbin: &str) {
|
||||
let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension");
|
||||
for ext_data in remote_extensions.extension_data.values() {
|
||||
for (ext_name, ext_data) in remote_extensions.extension_data.iter() {
|
||||
// Check if extension is present in public or custom.
|
||||
// If not, then it is not allowed to be used by this compute.
|
||||
if let Some(public_extensions) = &remote_extensions.public_extensions {
|
||||
if !public_extensions.contains(ext_name) {
|
||||
if let Some(custom_extensions) = &remote_extensions.custom_extensions {
|
||||
if !custom_extensions.contains(ext_name) {
|
||||
continue; // skip this extension, it is not allowed
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (control_name, control_content) in &ext_data.control_data {
|
||||
let control_path = local_sharedir.join(control_name);
|
||||
if !control_path.exists() {
|
||||
|
||||
@@ -169,7 +169,12 @@ async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body
|
||||
}
|
||||
};
|
||||
|
||||
remote_extensions.get_ext(&filename, is_library)
|
||||
remote_extensions.get_ext(
|
||||
&filename,
|
||||
is_library,
|
||||
&compute.build_tag,
|
||||
&compute.pgversion,
|
||||
)
|
||||
};
|
||||
|
||||
match ext {
|
||||
|
||||
@@ -138,7 +138,13 @@ impl ComputeControlPlane {
|
||||
mode,
|
||||
tenant_id,
|
||||
pg_version,
|
||||
skip_pg_catalog_updates: false,
|
||||
// We don't setup roles and databases in the spec locally, so we don't need to
|
||||
// do catalog updates. Catalog updates also include check availability
|
||||
// data creation. Yet, we have tests that check that size and db dump
|
||||
// before and after start are the same. So, skip catalog updates,
|
||||
// with this we basically test a case of waking up an idle compute, where
|
||||
// we also skip catalog updates in the cloud.
|
||||
skip_pg_catalog_updates: true,
|
||||
});
|
||||
|
||||
ep.create_endpoint_dir()?;
|
||||
@@ -152,7 +158,7 @@ impl ComputeControlPlane {
|
||||
http_port,
|
||||
pg_port,
|
||||
pg_version,
|
||||
skip_pg_catalog_updates: false,
|
||||
skip_pg_catalog_updates: true,
|
||||
})?,
|
||||
)?;
|
||||
std::fs::write(
|
||||
|
||||
@@ -89,6 +89,8 @@ impl RemoteExtSpec {
|
||||
&self,
|
||||
ext_name: &str,
|
||||
is_library: bool,
|
||||
build_tag: &str,
|
||||
pg_major_version: &str,
|
||||
) -> anyhow::Result<(String, RemotePath)> {
|
||||
let mut real_ext_name = ext_name;
|
||||
if is_library {
|
||||
@@ -104,11 +106,32 @@ impl RemoteExtSpec {
|
||||
.ok_or(anyhow::anyhow!("library {} is not found", lib_raw_name))?;
|
||||
}
|
||||
|
||||
// Check if extension is present in public or custom.
|
||||
// If not, then it is not allowed to be used by this compute.
|
||||
if let Some(public_extensions) = &self.public_extensions {
|
||||
if !public_extensions.contains(&real_ext_name.to_string()) {
|
||||
if let Some(custom_extensions) = &self.custom_extensions {
|
||||
if !custom_extensions.contains(&real_ext_name.to_string()) {
|
||||
return Err(anyhow::anyhow!("extension {} is not found", real_ext_name));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match self.extension_data.get(real_ext_name) {
|
||||
Some(ext_data) => Ok((
|
||||
real_ext_name.to_string(),
|
||||
RemotePath::from_string(&ext_data.archive_path)?,
|
||||
)),
|
||||
Some(_ext_data) => {
|
||||
// Construct the path to the extension archive
|
||||
// BUILD_TAG/PG_MAJOR_VERSION/extensions/EXTENSION_NAME.tar.zst
|
||||
//
|
||||
// Keep it in sync with path generation in
|
||||
// https://github.com/neondatabase/build-custom-extensions/tree/main
|
||||
let archive_path_str =
|
||||
format!("{build_tag}/{pg_major_version}/extensions/{real_ext_name}.tar.zst");
|
||||
Ok((
|
||||
real_ext_name.to_string(),
|
||||
RemotePath::from_string(&archive_path_str)?,
|
||||
))
|
||||
}
|
||||
None => Err(anyhow::anyhow!(
|
||||
"real_ext_name {} is not found",
|
||||
real_ext_name
|
||||
|
||||
113
libs/utils/src/generation.rs
Normal file
113
libs/utils/src/generation.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Tenant generations are used to provide split-brain safety and allow
|
||||
/// multiple pageservers to attach the same tenant concurrently.
|
||||
///
|
||||
/// See docs/rfcs/025-generation-numbers.md for detail on how generation
|
||||
/// numbers are used.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord)]
|
||||
pub enum Generation {
|
||||
// Generations with this magic value will not add a suffix to S3 keys, and will not
|
||||
// be included in persisted index_part.json. This value is only to be used
|
||||
// during migration from pre-generation metadata to generation-aware metadata,
|
||||
// and should eventually go away.
|
||||
//
|
||||
// A special Generation is used rather than always wrapping Generation in an Option,
|
||||
// so that code handling generations doesn't have to be aware of the legacy
|
||||
// case everywhere it touches a generation.
|
||||
None,
|
||||
// Generations with this magic value may never be used to construct S3 keys:
|
||||
// we will panic if someone tries to. This is for Tenants in the "Broken" state,
|
||||
// so that we can satisfy their constructor with a Generation without risking
|
||||
// a code bug using it in an S3 write (broken tenants should never write)
|
||||
Broken,
|
||||
Valid(u32),
|
||||
}
|
||||
|
||||
/// The Generation type represents a number associated with a Tenant, which
|
||||
/// increments every time the tenant is attached to a new pageserver, or
|
||||
/// an attached pageserver restarts.
|
||||
///
|
||||
/// It is included as a suffix in S3 keys, as a protection against split-brain
|
||||
/// scenarios where pageservers might otherwise issue conflicting writes to
|
||||
/// remote storage
|
||||
impl Generation {
|
||||
/// Create a new Generation that represents a legacy key format with
|
||||
/// no generation suffix
|
||||
pub fn none() -> Self {
|
||||
Self::None
|
||||
}
|
||||
|
||||
// Create a new generation that will panic if you try to use get_suffix
|
||||
pub fn broken() -> Self {
|
||||
Self::Broken
|
||||
}
|
||||
|
||||
pub fn new(v: u32) -> Self {
|
||||
Self::Valid(v)
|
||||
}
|
||||
|
||||
pub fn is_none(&self) -> bool {
|
||||
matches!(self, Self::None)
|
||||
}
|
||||
|
||||
pub fn get_suffix(&self) -> String {
|
||||
match self {
|
||||
Self::Valid(v) => {
|
||||
format!("-{:08x}", v)
|
||||
}
|
||||
Self::None => "".into(),
|
||||
Self::Broken => {
|
||||
panic!("Tried to use a broken generation");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Generation {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
if let Self::Valid(v) = self {
|
||||
v.serialize(serializer)
|
||||
} else {
|
||||
// We should never be asked to serialize a None or Broken. Structures
|
||||
// that include an optional generation should convert None to an
|
||||
// Option<Generation>::None
|
||||
Err(serde::ser::Error::custom(
|
||||
"Tried to serialize invalid generation ({self})",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Generation {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(Self::Valid(u32::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
// We intentionally do not implement Display for Generation, to reduce the
|
||||
// risk of a bug where the generation is used in a format!() string directly
|
||||
// instead of using get_suffix().
|
||||
impl Debug for Generation {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Valid(v) => {
|
||||
write!(f, "{:08x}", v)
|
||||
}
|
||||
Self::None => {
|
||||
write!(f, "<none>")
|
||||
}
|
||||
Self::Broken => {
|
||||
write!(f, "<broken>")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,9 @@ pub mod id;
|
||||
// http endpoint utils
|
||||
pub mod http;
|
||||
|
||||
// definition of the Generation type for pageserver attachment APIs
|
||||
pub mod generation;
|
||||
|
||||
// common log initialisation routine
|
||||
pub mod logging;
|
||||
|
||||
|
||||
@@ -643,23 +643,6 @@ impl PageServerConf {
|
||||
.join(METADATA_FILE_NAME)
|
||||
}
|
||||
|
||||
/// Files on the remote storage are stored with paths, relative to the workdir.
|
||||
/// That path includes in itself both tenant and timeline ids, allowing to have a unique remote storage path.
|
||||
///
|
||||
/// Errors if the path provided does not start from pageserver's workdir.
|
||||
pub fn remote_path(&self, local_path: &Path) -> anyhow::Result<RemotePath> {
|
||||
local_path
|
||||
.strip_prefix(&self.workdir)
|
||||
.context("Failed to strip workdir prefix")
|
||||
.and_then(RemotePath::new)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to resolve remote part of path {:?} for base {:?}",
|
||||
local_path, self.workdir
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Turns storage remote path of a file into its local path.
|
||||
pub fn local_path(&self, remote_path: &RemotePath) -> PathBuf {
|
||||
remote_path.with_base(&self.workdir)
|
||||
|
||||
@@ -469,7 +469,9 @@ impl PageServerHandler {
|
||||
// Create empty timeline
|
||||
info!("creating new timeline");
|
||||
let tenant = get_active_tenant_with_timeout(tenant_id, &ctx).await?;
|
||||
let timeline = tenant.create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)?;
|
||||
let timeline = tenant
|
||||
.create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
|
||||
.await?;
|
||||
|
||||
// TODO mark timeline as not ready until it reaches end_lsn.
|
||||
// We might have some wal to import as well, and we should prevent compute
|
||||
|
||||
@@ -68,7 +68,7 @@ use crate::task_mgr;
|
||||
use crate::task_mgr::TaskKind;
|
||||
use crate::tenant::config::TenantConfOpt;
|
||||
use crate::tenant::metadata::load_metadata;
|
||||
use crate::tenant::remote_timeline_client::index::IndexPart;
|
||||
pub use crate::tenant::remote_timeline_client::index::IndexPart;
|
||||
use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart;
|
||||
use crate::tenant::storage_layer::DeltaLayer;
|
||||
use crate::tenant::storage_layer::ImageLayer;
|
||||
@@ -85,6 +85,7 @@ pub use pageserver_api::models::TenantState;
|
||||
use toml_edit;
|
||||
use utils::{
|
||||
crashsafe,
|
||||
generation::Generation,
|
||||
id::{TenantId, TimelineId},
|
||||
lsn::{Lsn, RecordLsn},
|
||||
};
|
||||
@@ -178,6 +179,11 @@ pub struct Tenant {
|
||||
tenant_conf: Arc<RwLock<TenantConfOpt>>,
|
||||
|
||||
tenant_id: TenantId,
|
||||
|
||||
/// The remote storage generation, used to protect S3 objects from split-brain.
|
||||
/// Does not change over the lifetime of the [`Tenant`] object.
|
||||
generation: Generation,
|
||||
|
||||
timelines: Mutex<HashMap<TimelineId, Arc<Timeline>>>,
|
||||
// This mutex prevents creation of new timelines during GC.
|
||||
// Adding yet another mutex (in addition to `timelines`) is needed because holding
|
||||
@@ -442,6 +448,7 @@ impl Tenant {
|
||||
up_to_date_metadata,
|
||||
first_save,
|
||||
)
|
||||
.await
|
||||
.context("save_metadata")?;
|
||||
}
|
||||
|
||||
@@ -522,6 +529,7 @@ impl Tenant {
|
||||
pub(crate) fn spawn_attach(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: TenantId,
|
||||
generation: Generation,
|
||||
broker_client: storage_broker::BrokerClientChannel,
|
||||
tenants: &'static tokio::sync::RwLock<TenantsMap>,
|
||||
remote_storage: GenericRemoteStorage,
|
||||
@@ -538,6 +546,7 @@ impl Tenant {
|
||||
tenant_conf,
|
||||
wal_redo_manager,
|
||||
tenant_id,
|
||||
generation,
|
||||
Some(remote_storage.clone()),
|
||||
));
|
||||
|
||||
@@ -648,12 +657,8 @@ impl Tenant {
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow::anyhow!("cannot attach without remote storage"))?;
|
||||
|
||||
let remote_timeline_ids = remote_timeline_client::list_remote_timelines(
|
||||
remote_storage,
|
||||
self.conf,
|
||||
self.tenant_id,
|
||||
)
|
||||
.await?;
|
||||
let remote_timeline_ids =
|
||||
remote_timeline_client::list_remote_timelines(remote_storage, self.tenant_id).await?;
|
||||
|
||||
info!("found {} timelines", remote_timeline_ids.len());
|
||||
|
||||
@@ -665,6 +670,7 @@ impl Tenant {
|
||||
self.conf,
|
||||
self.tenant_id,
|
||||
timeline_id,
|
||||
self.generation,
|
||||
);
|
||||
part_downloads.spawn(
|
||||
async move {
|
||||
@@ -851,6 +857,7 @@ impl Tenant {
|
||||
TenantConfOpt::default(),
|
||||
wal_redo_manager,
|
||||
tenant_id,
|
||||
Generation::broken(),
|
||||
None,
|
||||
))
|
||||
}
|
||||
@@ -868,6 +875,7 @@ impl Tenant {
|
||||
pub(crate) fn spawn_load(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: TenantId,
|
||||
generation: Generation,
|
||||
resources: TenantSharedResources,
|
||||
init_order: Option<InitializationOrder>,
|
||||
tenants: &'static tokio::sync::RwLock<TenantsMap>,
|
||||
@@ -893,6 +901,7 @@ impl Tenant {
|
||||
tenant_conf,
|
||||
wal_redo_manager,
|
||||
tenant_id,
|
||||
generation,
|
||||
remote_storage.clone(),
|
||||
);
|
||||
let tenant = Arc::new(tenant);
|
||||
@@ -1442,7 +1451,7 @@ impl Tenant {
|
||||
/// For tests, use `DatadirModification::init_empty_test_timeline` + `commit` to setup the
|
||||
/// minimum amount of keys required to get a writable timeline.
|
||||
/// (Without it, `put` might fail due to `repartition` failing.)
|
||||
pub fn create_empty_timeline(
|
||||
pub async fn create_empty_timeline(
|
||||
&self,
|
||||
new_timeline_id: TimelineId,
|
||||
initdb_lsn: Lsn,
|
||||
@@ -1454,10 +1463,10 @@ impl Tenant {
|
||||
"Cannot create empty timelines on inactive tenant"
|
||||
);
|
||||
|
||||
let timelines = self.timelines.lock().unwrap();
|
||||
let timeline_uninit_mark = self.create_timeline_uninit_mark(new_timeline_id, &timelines)?;
|
||||
drop(timelines);
|
||||
|
||||
let timeline_uninit_mark = {
|
||||
let timelines = self.timelines.lock().unwrap();
|
||||
self.create_timeline_uninit_mark(new_timeline_id, &timelines)?
|
||||
};
|
||||
let new_metadata = TimelineMetadata::new(
|
||||
// Initialize disk_consistent LSN to 0, The caller must import some data to
|
||||
// make it valid, before calling finish_creation()
|
||||
@@ -1476,6 +1485,7 @@ impl Tenant {
|
||||
initdb_lsn,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Helper for unit tests to create an empty timeline.
|
||||
@@ -1491,7 +1501,9 @@ impl Tenant {
|
||||
pg_version: u32,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<Arc<Timeline>> {
|
||||
let uninit_tl = self.create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)?;
|
||||
let uninit_tl = self
|
||||
.create_empty_timeline(new_timeline_id, initdb_lsn, pg_version, ctx)
|
||||
.await?;
|
||||
let tline = uninit_tl.raw_timeline().expect("we just created it");
|
||||
assert_eq!(tline.get_last_record_lsn(), Lsn(0));
|
||||
|
||||
@@ -2274,6 +2286,7 @@ impl Tenant {
|
||||
ancestor,
|
||||
new_timeline_id,
|
||||
self.tenant_id,
|
||||
self.generation,
|
||||
Arc::clone(&self.walredo_mgr),
|
||||
resources,
|
||||
pg_version,
|
||||
@@ -2291,6 +2304,7 @@ impl Tenant {
|
||||
tenant_conf: TenantConfOpt,
|
||||
walredo_mgr: Arc<dyn WalRedoManager + Send + Sync>,
|
||||
tenant_id: TenantId,
|
||||
generation: Generation,
|
||||
remote_storage: Option<GenericRemoteStorage>,
|
||||
) -> Tenant {
|
||||
let (state, mut rx) = watch::channel(state);
|
||||
@@ -2349,6 +2363,7 @@ impl Tenant {
|
||||
|
||||
Tenant {
|
||||
tenant_id,
|
||||
generation,
|
||||
conf,
|
||||
// using now here is good enough approximation to catch tenants with really long
|
||||
// activation times.
|
||||
@@ -2786,13 +2801,15 @@ impl Tenant {
|
||||
src_timeline.pg_version,
|
||||
);
|
||||
|
||||
let uninitialized_timeline = self.prepare_new_timeline(
|
||||
dst_id,
|
||||
&metadata,
|
||||
timeline_uninit_mark,
|
||||
start_lsn + 1,
|
||||
Some(Arc::clone(src_timeline)),
|
||||
)?;
|
||||
let uninitialized_timeline = self
|
||||
.prepare_new_timeline(
|
||||
dst_id,
|
||||
&metadata,
|
||||
timeline_uninit_mark,
|
||||
start_lsn + 1,
|
||||
Some(Arc::clone(src_timeline)),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let new_timeline = uninitialized_timeline.finish_creation()?;
|
||||
|
||||
@@ -2870,13 +2887,15 @@ impl Tenant {
|
||||
pgdata_lsn,
|
||||
pg_version,
|
||||
);
|
||||
let raw_timeline = self.prepare_new_timeline(
|
||||
timeline_id,
|
||||
&new_metadata,
|
||||
timeline_uninit_mark,
|
||||
pgdata_lsn,
|
||||
None,
|
||||
)?;
|
||||
let raw_timeline = self
|
||||
.prepare_new_timeline(
|
||||
timeline_id,
|
||||
&new_metadata,
|
||||
timeline_uninit_mark,
|
||||
pgdata_lsn,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let tenant_id = raw_timeline.owning_tenant.tenant_id;
|
||||
let unfinished_timeline = raw_timeline.raw_timeline()?;
|
||||
@@ -2931,6 +2950,7 @@ impl Tenant {
|
||||
self.conf,
|
||||
self.tenant_id,
|
||||
timeline_id,
|
||||
self.generation,
|
||||
);
|
||||
Some(remote_client)
|
||||
} else {
|
||||
@@ -2946,7 +2966,7 @@ impl Tenant {
|
||||
/// at 'disk_consistent_lsn'. After any initial data has been imported, call
|
||||
/// `finish_creation` to insert the Timeline into the timelines map and to remove the
|
||||
/// uninit mark file.
|
||||
fn prepare_new_timeline(
|
||||
async fn prepare_new_timeline(
|
||||
&self,
|
||||
new_timeline_id: TimelineId,
|
||||
new_metadata: &TimelineMetadata,
|
||||
@@ -2974,8 +2994,9 @@ impl Tenant {
|
||||
|
||||
timeline_struct.init_empty_layer_map(start_lsn);
|
||||
|
||||
if let Err(e) =
|
||||
self.create_timeline_files(&uninit_mark.timeline_path, &new_timeline_id, new_metadata)
|
||||
if let Err(e) = self
|
||||
.create_timeline_files(&uninit_mark.timeline_path, &new_timeline_id, new_metadata)
|
||||
.await
|
||||
{
|
||||
error!("Failed to create initial files for timeline {tenant_id}/{new_timeline_id}, cleaning up: {e:?}");
|
||||
cleanup_timeline_directory(uninit_mark);
|
||||
@@ -2991,7 +3012,7 @@ impl Tenant {
|
||||
))
|
||||
}
|
||||
|
||||
fn create_timeline_files(
|
||||
async fn create_timeline_files(
|
||||
&self,
|
||||
timeline_path: &Path,
|
||||
new_timeline_id: &TimelineId,
|
||||
@@ -3010,6 +3031,7 @@ impl Tenant {
|
||||
new_metadata,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.context("Failed to create timeline metadata")?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -3454,6 +3476,7 @@ pub mod harness {
|
||||
pub conf: &'static PageServerConf,
|
||||
pub tenant_conf: TenantConf,
|
||||
pub tenant_id: TenantId,
|
||||
pub generation: Generation,
|
||||
}
|
||||
|
||||
static LOG_HANDLE: OnceCell<()> = OnceCell::new();
|
||||
@@ -3495,6 +3518,7 @@ pub mod harness {
|
||||
conf,
|
||||
tenant_conf,
|
||||
tenant_id,
|
||||
generation: Generation::new(0xdeadbeef),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3521,6 +3545,7 @@ pub mod harness {
|
||||
TenantConfOpt::from(self.tenant_conf),
|
||||
walredo_mgr,
|
||||
self.tenant_id,
|
||||
self.generation,
|
||||
remote_storage,
|
||||
));
|
||||
tenant
|
||||
@@ -3634,7 +3659,10 @@ mod tests {
|
||||
.create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
|
||||
match tenant.create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) {
|
||||
match tenant
|
||||
.create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
|
||||
.await
|
||||
{
|
||||
Ok(_) => panic!("duplicate timeline creation should fail"),
|
||||
Err(e) => assert_eq!(
|
||||
e.to_string(),
|
||||
@@ -4474,8 +4502,9 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let initdb_lsn = Lsn(0x20);
|
||||
let utline =
|
||||
tenant.create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)?;
|
||||
let utline = tenant
|
||||
.create_empty_timeline(TIMELINE_ID, initdb_lsn, DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
let tline = utline.raw_timeline().unwrap();
|
||||
|
||||
// Spawn flush loop now so that we can set the `expect_initdb_optimization`
|
||||
@@ -4540,8 +4569,9 @@ mod tests {
|
||||
let harness = TenantHarness::create(name)?;
|
||||
{
|
||||
let (tenant, ctx) = harness.load().await;
|
||||
let tline =
|
||||
tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)?;
|
||||
let tline = tenant
|
||||
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION, &ctx)
|
||||
.await?;
|
||||
// Keeps uninit mark in place
|
||||
std::mem::forget(tline);
|
||||
}
|
||||
|
||||
@@ -96,18 +96,12 @@ pub trait BlobWriter {
|
||||
/// An implementation of BlobWriter to write blobs to anything that
|
||||
/// implements std::io::Write.
|
||||
///
|
||||
pub struct WriteBlobWriter<W>
|
||||
where
|
||||
W: std::io::Write,
|
||||
{
|
||||
pub struct WriteBlobWriter<W> {
|
||||
inner: W,
|
||||
offset: u64,
|
||||
}
|
||||
|
||||
impl<W> WriteBlobWriter<W>
|
||||
where
|
||||
W: std::io::Write,
|
||||
{
|
||||
impl<W> WriteBlobWriter<W> {
|
||||
pub fn new(inner: W, start_offset: u64) -> Self {
|
||||
WriteBlobWriter {
|
||||
inner,
|
||||
@@ -129,33 +123,38 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<W> BlobWriter for WriteBlobWriter<W>
|
||||
where
|
||||
W: std::io::Write,
|
||||
{
|
||||
fn write_blob(&mut self, srcbuf: &[u8]) -> Result<u64, Error> {
|
||||
let offset = self.offset;
|
||||
macro_rules! write_blob_impl {
|
||||
(WriteBlobWriter<$ty:ty>) => {
|
||||
impl WriteBlobWriter<$ty> {
|
||||
pub async fn write_blob(&mut self, srcbuf: &[u8]) -> Result<u64, Error> {
|
||||
use std::io::Write;
|
||||
let offset = self.offset;
|
||||
|
||||
if srcbuf.len() < 128 {
|
||||
// Short blob. Write a 1-byte length header
|
||||
let len_buf = srcbuf.len() as u8;
|
||||
self.inner.write_all(&[len_buf])?;
|
||||
self.offset += 1;
|
||||
} else {
|
||||
// Write a 4-byte length header
|
||||
if srcbuf.len() > 0x7fff_ffff {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("blob too large ({} bytes)", srcbuf.len()),
|
||||
));
|
||||
if srcbuf.len() < 128 {
|
||||
// Short blob. Write a 1-byte length header
|
||||
let len_buf = srcbuf.len() as u8;
|
||||
self.inner.write_all(&[len_buf])?;
|
||||
self.offset += 1;
|
||||
} else {
|
||||
// Write a 4-byte length header
|
||||
if srcbuf.len() > 0x7fff_ffff {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("blob too large ({} bytes)", srcbuf.len()),
|
||||
));
|
||||
}
|
||||
let mut len_buf = ((srcbuf.len()) as u32).to_be_bytes();
|
||||
len_buf[0] |= 0x80;
|
||||
self.inner.write_all(&len_buf)?;
|
||||
self.offset += 4;
|
||||
}
|
||||
self.inner.write_all(srcbuf)?;
|
||||
self.offset += srcbuf.len() as u64;
|
||||
Ok(offset)
|
||||
}
|
||||
let mut len_buf = ((srcbuf.len()) as u32).to_be_bytes();
|
||||
len_buf[0] |= 0x80;
|
||||
self.inner.write_all(&len_buf)?;
|
||||
self.offset += 4;
|
||||
}
|
||||
self.inner.write_all(srcbuf)?;
|
||||
self.offset += srcbuf.len() as u64;
|
||||
Ok(offset)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
write_blob_impl!(WriteBlobWriter<crate::tenant::io::BufWriter<crate::virtual_file::VirtualFile>>);
|
||||
write_blob_impl!(WriteBlobWriter<crate::virtual_file::VirtualFile>);
|
||||
|
||||
@@ -7,9 +7,7 @@ use super::storage_layer::delta_layer::{Adapter, DeltaLayerInner};
|
||||
use crate::page_cache::{self, PageReadGuard, ReadBufResult, PAGE_SZ};
|
||||
use crate::virtual_file::VirtualFile;
|
||||
use bytes::Bytes;
|
||||
use std::fs::File;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::os::unix::fs::FileExt;
|
||||
|
||||
/// This is implemented by anything that can read 8 kB (PAGE_SZ)
|
||||
/// blocks, using the page cache
|
||||
@@ -74,7 +72,6 @@ impl<'a> Deref for BlockLease<'a> {
|
||||
/// Unlike traits, we also support the read function to be async though.
|
||||
pub(crate) enum BlockReaderRef<'a> {
|
||||
FileBlockReaderVirtual(&'a FileBlockReader<VirtualFile>),
|
||||
FileBlockReaderFile(&'a FileBlockReader<std::fs::File>),
|
||||
EphemeralFile(&'a EphemeralFile),
|
||||
Adapter(Adapter<&'a DeltaLayerInner>),
|
||||
#[cfg(test)]
|
||||
@@ -87,7 +84,6 @@ impl<'a> BlockReaderRef<'a> {
|
||||
use BlockReaderRef::*;
|
||||
match self {
|
||||
FileBlockReaderVirtual(r) => r.read_blk(blknum).await,
|
||||
FileBlockReaderFile(r) => r.read_blk(blknum).await,
|
||||
EphemeralFile(r) => r.read_blk(blknum).await,
|
||||
Adapter(r) => r.read_blk(blknum).await,
|
||||
#[cfg(test)]
|
||||
@@ -150,18 +146,15 @@ pub struct FileBlockReader<F> {
|
||||
file_id: page_cache::FileId,
|
||||
}
|
||||
|
||||
impl<F> FileBlockReader<F>
|
||||
where
|
||||
F: FileExt,
|
||||
{
|
||||
pub fn new(file: F) -> Self {
|
||||
impl FileBlockReader<VirtualFile> {
|
||||
pub fn new(file: VirtualFile) -> Self {
|
||||
let file_id = page_cache::next_file_id();
|
||||
|
||||
FileBlockReader { file_id, file }
|
||||
}
|
||||
|
||||
/// Read a page from the underlying file into given buffer.
|
||||
fn fill_buffer(&self, buf: &mut [u8], blkno: u32) -> Result<(), std::io::Error> {
|
||||
async fn fill_buffer(&self, buf: &mut [u8], blkno: u32) -> Result<(), std::io::Error> {
|
||||
assert!(buf.len() == PAGE_SZ);
|
||||
self.file.read_exact_at(buf, blkno as u64 * PAGE_SZ as u64)
|
||||
}
|
||||
@@ -185,7 +178,7 @@ where
|
||||
ReadBufResult::Found(guard) => break Ok(guard.into()),
|
||||
ReadBufResult::NotFound(mut write_guard) => {
|
||||
// Read the page from disk into the buffer
|
||||
self.fill_buffer(write_guard.deref_mut(), blknum)?;
|
||||
self.fill_buffer(write_guard.deref_mut(), blknum).await?;
|
||||
write_guard.mark_valid();
|
||||
|
||||
// Swap for read lock
|
||||
@@ -196,12 +189,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockReader for FileBlockReader<File> {
|
||||
fn block_cursor(&self) -> BlockCursor<'_> {
|
||||
BlockCursor::new(BlockReaderRef::FileBlockReaderFile(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockReader for FileBlockReader<VirtualFile> {
|
||||
fn block_cursor(&self) -> BlockCursor<'_> {
|
||||
BlockCursor::new(BlockReaderRef::FileBlockReaderVirtual(self))
|
||||
|
||||
@@ -9,7 +9,6 @@ use std::cmp::min;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::{self, ErrorKind};
|
||||
use std::ops::DerefMut;
|
||||
use std::os::unix::prelude::FileExt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use tracing::*;
|
||||
@@ -128,10 +127,15 @@ impl EphemeralFile {
|
||||
self.off += n;
|
||||
src_remaining = &src_remaining[n..];
|
||||
if self.off == PAGE_SZ {
|
||||
match self.ephemeral_file.file.write_all_at(
|
||||
&self.ephemeral_file.mutable_tail,
|
||||
self.blknum as u64 * PAGE_SZ as u64,
|
||||
) {
|
||||
match self
|
||||
.ephemeral_file
|
||||
.file
|
||||
.write_all_at(
|
||||
&self.ephemeral_file.mutable_tail,
|
||||
self.blknum as u64 * PAGE_SZ as u64,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
// Pre-warm the page cache with what we just wrote.
|
||||
// This isn't necessary for coherency/correctness, but it's how we've always done it.
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
//! recovered from this file. This is tracked in
|
||||
//! <https://github.com/neondatabase/neon/issues/4418>
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
use std::io::{self, Write};
|
||||
|
||||
use crate::virtual_file::VirtualFile;
|
||||
use anyhow::Result;
|
||||
@@ -151,11 +151,12 @@ impl Manifest {
|
||||
/// Load a manifest. Returns the manifest and a list of operations. If the manifest is corrupted,
|
||||
/// the bool flag will be set to true and the user is responsible to reconstruct a new manifest and
|
||||
/// backup the current one.
|
||||
pub fn load(
|
||||
mut file: VirtualFile,
|
||||
pub async fn load(
|
||||
file: VirtualFile,
|
||||
) -> Result<(Self, Vec<Operation>, ManifestPartiallyCorrupted), ManifestLoadError> {
|
||||
let mut buf = vec![];
|
||||
file.read_to_end(&mut buf).map_err(ManifestLoadError::Io)?;
|
||||
file.read_exact_at(&mut buf, 0)
|
||||
.map_err(ManifestLoadError::Io)?;
|
||||
|
||||
// Read manifest header
|
||||
let mut buf = Bytes::from(buf);
|
||||
@@ -241,8 +242,8 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_read_manifest() {
|
||||
#[tokio::test]
|
||||
async fn test_read_manifest() {
|
||||
let testdir = crate::config::PageServerConf::test_repo_dir("test_read_manifest");
|
||||
std::fs::create_dir_all(&testdir).unwrap();
|
||||
let file = VirtualFile::create(&testdir.join("MANIFEST")).unwrap();
|
||||
@@ -274,7 +275,7 @@ mod tests {
|
||||
.truncate(false),
|
||||
)
|
||||
.unwrap();
|
||||
let (mut manifest, operations, corrupted) = Manifest::load(file).unwrap();
|
||||
let (mut manifest, operations, corrupted) = Manifest::load(file).await.unwrap();
|
||||
assert!(!corrupted.0);
|
||||
assert_eq!(operations.len(), 2);
|
||||
assert_eq!(
|
||||
@@ -306,7 +307,7 @@ mod tests {
|
||||
.truncate(false),
|
||||
)
|
||||
.unwrap();
|
||||
let (_manifest, operations, corrupted) = Manifest::load(file).unwrap();
|
||||
let (_manifest, operations, corrupted) = Manifest::load(file).await.unwrap();
|
||||
assert!(!corrupted.0);
|
||||
assert_eq!(operations.len(), 3);
|
||||
assert_eq!(&operations[0], &Operation::Snapshot(snapshot, Lsn::from(0)));
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
//! [`remote_timeline_client`]: super::remote_timeline_client
|
||||
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{self, Write};
|
||||
use std::io;
|
||||
|
||||
use anyhow::{bail, ensure, Context};
|
||||
use anyhow::{ensure, Context};
|
||||
use serde::{de::Error, Deserialize, Serialize, Serializer};
|
||||
use thiserror::Error;
|
||||
use tracing::info_span;
|
||||
@@ -255,7 +255,7 @@ impl Serialize for TimelineMetadata {
|
||||
}
|
||||
|
||||
/// Save timeline metadata to file
|
||||
pub fn save_metadata(
|
||||
pub async fn save_metadata(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: &TenantId,
|
||||
timeline_id: &TimelineId,
|
||||
@@ -273,10 +273,7 @@ pub fn save_metadata(
|
||||
|
||||
let metadata_bytes = data.to_bytes().context("Failed to get metadata bytes")?;
|
||||
|
||||
if file.write(&metadata_bytes)? != metadata_bytes.len() {
|
||||
bail!("Could not write all the metadata bytes in a single call");
|
||||
}
|
||||
file.sync_all()?;
|
||||
file.write_and_fsync(&metadata_bytes)?;
|
||||
|
||||
// fsync the parent directory to ensure the directory entry is durable
|
||||
if first_save {
|
||||
|
||||
@@ -25,6 +25,7 @@ use crate::tenant::{create_tenant_files, CreateTenantFilesMode, Tenant, TenantSt
|
||||
use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME};
|
||||
|
||||
use utils::fs_ext::PathExt;
|
||||
use utils::generation::Generation;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
use super::delete::DeleteTenantError;
|
||||
@@ -202,6 +203,7 @@ pub(crate) fn schedule_local_tenant_processing(
|
||||
match Tenant::spawn_attach(
|
||||
conf,
|
||||
tenant_id,
|
||||
Generation::none(),
|
||||
resources.broker_client,
|
||||
tenants,
|
||||
remote_storage,
|
||||
@@ -224,7 +226,15 @@ pub(crate) fn schedule_local_tenant_processing(
|
||||
} else {
|
||||
info!("tenant {tenant_id} is assumed to be loadable, starting load operation");
|
||||
// Start loading the tenant into memory. It will initially be in Loading state.
|
||||
Tenant::spawn_load(conf, tenant_id, resources, init_order, tenants, ctx)
|
||||
Tenant::spawn_load(
|
||||
conf,
|
||||
tenant_id,
|
||||
Generation::none(),
|
||||
resources,
|
||||
init_order,
|
||||
tenants,
|
||||
ctx,
|
||||
)
|
||||
};
|
||||
Ok(tenant)
|
||||
}
|
||||
|
||||
@@ -216,7 +216,7 @@ use utils::backoff::{
|
||||
};
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@@ -235,6 +235,7 @@ use crate::task_mgr::shutdown_token;
|
||||
use crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id;
|
||||
use crate::tenant::remote_timeline_client::index::LayerFileMetadata;
|
||||
use crate::tenant::upload_queue::Delete;
|
||||
use crate::tenant::TIMELINES_SEGMENT_NAME;
|
||||
use crate::{
|
||||
config::PageServerConf,
|
||||
task_mgr,
|
||||
@@ -252,6 +253,7 @@ use self::index::IndexPart;
|
||||
|
||||
use super::storage_layer::LayerFileName;
|
||||
use super::upload_queue::SetDeletedFlagProgress;
|
||||
use super::Generation;
|
||||
|
||||
// Occasional network issues and such can cause remote operations to fail, and
|
||||
// that's expected. If a download fails, we log it at info-level, and retry.
|
||||
@@ -315,6 +317,7 @@ pub struct RemoteTimelineClient {
|
||||
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
generation: Generation,
|
||||
|
||||
upload_queue: Mutex<UploadQueue>,
|
||||
|
||||
@@ -335,12 +338,14 @@ impl RemoteTimelineClient {
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
generation: Generation,
|
||||
) -> RemoteTimelineClient {
|
||||
RemoteTimelineClient {
|
||||
conf,
|
||||
runtime: BACKGROUND_RUNTIME.handle().to_owned(),
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
generation,
|
||||
storage_impl: remote_storage,
|
||||
upload_queue: Mutex::new(UploadQueue::Uninitialized),
|
||||
metrics: Arc::new(RemoteTimelineClientMetrics::new(&tenant_id, &timeline_id)),
|
||||
@@ -449,10 +454,10 @@ impl RemoteTimelineClient {
|
||||
);
|
||||
|
||||
let index_part = download::download_index_part(
|
||||
self.conf,
|
||||
&self.storage_impl,
|
||||
&self.tenant_id,
|
||||
&self.timeline_id,
|
||||
self.generation,
|
||||
)
|
||||
.measure_remote_op(
|
||||
self.tenant_id,
|
||||
@@ -650,22 +655,41 @@ impl RemoteTimelineClient {
|
||||
// from latest_files, but not yet scheduled for deletion. Use a closure
|
||||
// to syntactically forbid ? or bail! calls here.
|
||||
let no_bail_here = || {
|
||||
for name in names {
|
||||
if upload_queue.latest_files.remove(name).is_some() {
|
||||
upload_queue.latest_files_changes_since_metadata_upload_scheduled += 1;
|
||||
}
|
||||
}
|
||||
// Decorate our list of names with each name's generation, dropping
|
||||
// makes that are unexpectedly missing from our metadata.
|
||||
let with_generations: Vec<_> = names
|
||||
.iter()
|
||||
.filter_map(|name| {
|
||||
// Remove from latest_files, learning the file's remote generation in the process
|
||||
let meta = upload_queue.latest_files.remove(name);
|
||||
|
||||
if let Some(meta) = meta {
|
||||
upload_queue.latest_files_changes_since_metadata_upload_scheduled += 1;
|
||||
Some((name, meta.generation))
|
||||
} else {
|
||||
// This can only happen if we forgot to to schedule the file upload
|
||||
// before scheduling the delete. Log it because it is a rare/strange
|
||||
// situation, and in case something is misbehaving, we'd like to know which
|
||||
// layers experienced this.
|
||||
info!(
|
||||
"Deleting layer {name} not found in latest_files list, never uploaded?"
|
||||
);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 {
|
||||
self.schedule_index_upload(upload_queue, metadata);
|
||||
}
|
||||
|
||||
// schedule the actual deletions
|
||||
for name in names {
|
||||
for (name, generation) in with_generations {
|
||||
let op = UploadOp::Delete(Delete {
|
||||
file_kind: RemoteOpFileKind::Layer,
|
||||
layer_file_name: name.clone(),
|
||||
scheduled_from_timeline_delete: false,
|
||||
generation,
|
||||
});
|
||||
self.calls_unfinished_metric_begin(&op);
|
||||
upload_queue.queued_operations.push_back(op);
|
||||
@@ -761,10 +785,10 @@ impl RemoteTimelineClient {
|
||||
backoff::retry(
|
||||
|| {
|
||||
upload::upload_index_part(
|
||||
self.conf,
|
||||
&self.storage_impl,
|
||||
&self.tenant_id,
|
||||
&self.timeline_id,
|
||||
self.generation,
|
||||
&index_part_with_deleted_at,
|
||||
)
|
||||
},
|
||||
@@ -822,12 +846,14 @@ impl RemoteTimelineClient {
|
||||
.reserve(stopped.upload_queue_for_deletion.latest_files.len());
|
||||
|
||||
// schedule the actual deletions
|
||||
for name in stopped.upload_queue_for_deletion.latest_files.keys() {
|
||||
for (name, meta) in &stopped.upload_queue_for_deletion.latest_files {
|
||||
let op = UploadOp::Delete(Delete {
|
||||
file_kind: RemoteOpFileKind::Layer,
|
||||
layer_file_name: name.clone(),
|
||||
scheduled_from_timeline_delete: true,
|
||||
generation: meta.generation,
|
||||
});
|
||||
|
||||
self.calls_unfinished_metric_begin(&op);
|
||||
stopped
|
||||
.upload_queue_for_deletion
|
||||
@@ -850,8 +876,7 @@ impl RemoteTimelineClient {
|
||||
|
||||
// Do not delete index part yet, it is needed for possible retry. If we remove it first
|
||||
// and retry will arrive to different pageserver there wont be any traces of it on remote storage
|
||||
let timeline_path = self.conf.timeline_path(&self.tenant_id, &self.timeline_id);
|
||||
let timeline_storage_path = self.conf.remote_path(&timeline_path)?;
|
||||
let timeline_storage_path = remote_timeline_path(&self.tenant_id, &self.timeline_id);
|
||||
|
||||
let remaining = backoff::retry(
|
||||
|| async {
|
||||
@@ -1055,15 +1080,17 @@ impl RemoteTimelineClient {
|
||||
|
||||
let upload_result: anyhow::Result<()> = match &task.op {
|
||||
UploadOp::UploadLayer(ref layer_file_name, ref layer_metadata) => {
|
||||
let path = &self
|
||||
let path = self
|
||||
.conf
|
||||
.timeline_path(&self.tenant_id, &self.timeline_id)
|
||||
.join(layer_file_name.file_name());
|
||||
|
||||
upload::upload_timeline_layer(
|
||||
self.conf,
|
||||
&self.storage_impl,
|
||||
path,
|
||||
&path,
|
||||
layer_metadata,
|
||||
self.generation,
|
||||
)
|
||||
.measure_remote_op(
|
||||
self.tenant_id,
|
||||
@@ -1085,10 +1112,10 @@ impl RemoteTimelineClient {
|
||||
};
|
||||
|
||||
let res = upload::upload_index_part(
|
||||
self.conf,
|
||||
&self.storage_impl,
|
||||
&self.tenant_id,
|
||||
&self.timeline_id,
|
||||
self.generation,
|
||||
index_part,
|
||||
)
|
||||
.measure_remote_op(
|
||||
@@ -1113,7 +1140,7 @@ impl RemoteTimelineClient {
|
||||
.conf
|
||||
.timeline_path(&self.tenant_id, &self.timeline_id)
|
||||
.join(delete.layer_file_name.file_name());
|
||||
delete::delete_layer(self.conf, &self.storage_impl, path)
|
||||
delete::delete_layer(self.conf, &self.storage_impl, path, delete.generation)
|
||||
.measure_remote_op(
|
||||
self.tenant_id,
|
||||
self.timeline_id,
|
||||
@@ -1360,6 +1387,71 @@ impl RemoteTimelineClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remote_timelines_path(tenant_id: &TenantId) -> RemotePath {
|
||||
let path = format!("tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}");
|
||||
RemotePath::from_string(&path).expect("Failed to construct path")
|
||||
}
|
||||
|
||||
pub fn remote_timeline_path(tenant_id: &TenantId, timeline_id: &TimelineId) -> RemotePath {
|
||||
remote_timelines_path(tenant_id).join(&PathBuf::from(timeline_id.to_string()))
|
||||
}
|
||||
|
||||
pub fn remote_layer_path(
|
||||
tenant_id: &TenantId,
|
||||
timeline_id: &TimelineId,
|
||||
layer_file_name: &LayerFileName,
|
||||
layer_meta: &LayerFileMetadata,
|
||||
) -> RemotePath {
|
||||
// Generation-aware key format
|
||||
let path = format!(
|
||||
"tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{0}{1}",
|
||||
layer_file_name.file_name(),
|
||||
layer_meta.generation.get_suffix()
|
||||
);
|
||||
|
||||
RemotePath::from_string(&path).expect("Failed to construct path")
|
||||
}
|
||||
|
||||
pub fn remote_index_path(
|
||||
tenant_id: &TenantId,
|
||||
timeline_id: &TimelineId,
|
||||
generation: Generation,
|
||||
) -> RemotePath {
|
||||
RemotePath::from_string(&format!(
|
||||
"tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{0}{1}",
|
||||
IndexPart::FILE_NAME,
|
||||
generation.get_suffix()
|
||||
))
|
||||
.expect("Failed to construct path")
|
||||
}
|
||||
|
||||
/// Files on the remote storage are stored with paths, relative to the workdir.
|
||||
/// That path includes in itself both tenant and timeline ids, allowing to have a unique remote storage path.
|
||||
///
|
||||
/// Errors if the path provided does not start from pageserver's workdir.
|
||||
pub fn remote_path(
|
||||
conf: &PageServerConf,
|
||||
local_path: &Path,
|
||||
generation: Generation,
|
||||
) -> anyhow::Result<RemotePath> {
|
||||
let stripped = local_path
|
||||
.strip_prefix(&conf.workdir)
|
||||
.context("Failed to strip workdir prefix")?;
|
||||
|
||||
let suffixed = format!(
|
||||
"{0}{1}",
|
||||
stripped.to_string_lossy(),
|
||||
generation.get_suffix()
|
||||
);
|
||||
|
||||
RemotePath::new(&PathBuf::from(suffixed)).with_context(|| {
|
||||
format!(
|
||||
"to resolve remote part of path {:?} for base {:?}",
|
||||
local_path, conf.workdir
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1367,7 +1459,7 @@ mod tests {
|
||||
context::RequestContext,
|
||||
tenant::{
|
||||
harness::{TenantHarness, TIMELINE_ID},
|
||||
Tenant, Timeline,
|
||||
Generation, Tenant, Timeline,
|
||||
},
|
||||
DEFAULT_PG_VERSION,
|
||||
};
|
||||
@@ -1409,8 +1501,11 @@ mod tests {
|
||||
assert_eq!(avec, bvec);
|
||||
}
|
||||
|
||||
fn assert_remote_files(expected: &[&str], remote_path: &Path) {
|
||||
let mut expected: Vec<String> = expected.iter().map(|x| String::from(*x)).collect();
|
||||
fn assert_remote_files(expected: &[&str], remote_path: &Path, generation: Generation) {
|
||||
let mut expected: Vec<String> = expected
|
||||
.iter()
|
||||
.map(|x| format!("{}{}", x, generation.get_suffix()))
|
||||
.collect();
|
||||
expected.sort();
|
||||
|
||||
let mut found: Vec<String> = Vec::new();
|
||||
@@ -1461,6 +1556,8 @@ mod tests {
|
||||
storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()),
|
||||
};
|
||||
|
||||
let generation = Generation::new(0xdeadbeef);
|
||||
|
||||
let storage = GenericRemoteStorage::from_config(&storage_config).unwrap();
|
||||
|
||||
let client = Arc::new(RemoteTimelineClient {
|
||||
@@ -1468,6 +1565,7 @@ mod tests {
|
||||
runtime: tokio::runtime::Handle::current(),
|
||||
tenant_id: harness.tenant_id,
|
||||
timeline_id: TIMELINE_ID,
|
||||
generation,
|
||||
storage_impl: storage,
|
||||
upload_queue: Mutex::new(UploadQueue::Uninitialized),
|
||||
metrics: Arc::new(RemoteTimelineClientMetrics::new(
|
||||
@@ -1526,6 +1624,8 @@ mod tests {
|
||||
.init_upload_queue_for_empty_remote(&metadata)
|
||||
.unwrap();
|
||||
|
||||
let generation = Generation::new(0xdeadbeef);
|
||||
|
||||
// Create a couple of dummy files, schedule upload for them
|
||||
let layer_file_name_1: LayerFileName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap();
|
||||
let layer_file_name_2: LayerFileName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D9-00000000016B5A52".parse().unwrap();
|
||||
@@ -1545,13 +1645,13 @@ mod tests {
|
||||
client
|
||||
.schedule_layer_file_upload(
|
||||
&layer_file_name_1,
|
||||
&LayerFileMetadata::new(content_1.len() as u64),
|
||||
&LayerFileMetadata::new(content_1.len() as u64, generation),
|
||||
)
|
||||
.unwrap();
|
||||
client
|
||||
.schedule_layer_file_upload(
|
||||
&layer_file_name_2,
|
||||
&LayerFileMetadata::new(content_2.len() as u64),
|
||||
&LayerFileMetadata::new(content_2.len() as u64, generation),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -1615,7 +1715,7 @@ mod tests {
|
||||
client
|
||||
.schedule_layer_file_upload(
|
||||
&layer_file_name_3,
|
||||
&LayerFileMetadata::new(content_3.len() as u64),
|
||||
&LayerFileMetadata::new(content_3.len() as u64, generation),
|
||||
)
|
||||
.unwrap();
|
||||
client
|
||||
@@ -1639,6 +1739,7 @@ mod tests {
|
||||
"index_part.json",
|
||||
],
|
||||
&remote_timeline_dir,
|
||||
generation,
|
||||
);
|
||||
|
||||
// Finish them
|
||||
@@ -1651,6 +1752,7 @@ mod tests {
|
||||
"index_part.json",
|
||||
],
|
||||
&remote_timeline_dir,
|
||||
generation,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1703,12 +1805,14 @@ mod tests {
|
||||
|
||||
// Test
|
||||
|
||||
let generation = Generation::new(0xdeadbeef);
|
||||
|
||||
let init = get_bytes_started_stopped();
|
||||
|
||||
client
|
||||
.schedule_layer_file_upload(
|
||||
&layer_file_name_1,
|
||||
&LayerFileMetadata::new(content_1.len() as u64),
|
||||
&LayerFileMetadata::new(content_1.len() as u64, generation),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -5,25 +5,30 @@ use tracing::debug;
|
||||
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
|
||||
use crate::config::PageServerConf;
|
||||
use crate::{
|
||||
config::PageServerConf,
|
||||
tenant::{remote_timeline_client::remote_path, Generation},
|
||||
};
|
||||
|
||||
pub(super) async fn delete_layer<'a>(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &'a GenericRemoteStorage,
|
||||
local_layer_path: &'a Path,
|
||||
generation: Generation,
|
||||
) -> anyhow::Result<()> {
|
||||
fail::fail_point!("before-delete-layer", |_| {
|
||||
anyhow::bail!("failpoint before-delete-layer")
|
||||
});
|
||||
debug!("Deleting layer from remote storage: {local_layer_path:?}",);
|
||||
|
||||
let path_to_delete = conf.remote_path(local_layer_path)?;
|
||||
let path_to_delete = remote_path(conf, local_layer_path, generation)?;
|
||||
|
||||
// We don't want to print an error if the delete failed if the file has
|
||||
// already been deleted. Thankfully, in this situation S3 already
|
||||
// does not yield an error. While OS-provided local file system APIs do yield
|
||||
// errors, we avoid them in the `LocalFs` wrapper.
|
||||
storage.delete(&path_to_delete).await.with_context(|| {
|
||||
format!("Failed to delete remote layer from storage at {path_to_delete:?}")
|
||||
})
|
||||
storage
|
||||
.delete(&path_to_delete)
|
||||
.await
|
||||
.with_context(|| format!("delete remote layer from storage at {path_to_delete:?}"))
|
||||
}
|
||||
|
||||
@@ -15,14 +15,16 @@ use tokio_util::sync::CancellationToken;
|
||||
use utils::{backoff, crashsafe};
|
||||
|
||||
use crate::config::PageServerConf;
|
||||
use crate::tenant::remote_timeline_client::{remote_layer_path, remote_timelines_path};
|
||||
use crate::tenant::storage_layer::LayerFileName;
|
||||
use crate::tenant::timeline::span::debug_assert_current_span_has_tenant_and_timeline_id;
|
||||
use crate::tenant::Generation;
|
||||
use remote_storage::{DownloadError, GenericRemoteStorage};
|
||||
use utils::crashsafe::path_with_suffix_extension;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
use super::index::{IndexPart, LayerFileMetadata};
|
||||
use super::{FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES};
|
||||
use super::{remote_index_path, FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES};
|
||||
|
||||
static MAX_DOWNLOAD_DURATION: Duration = Duration::from_secs(120);
|
||||
|
||||
@@ -41,13 +43,11 @@ pub async fn download_layer_file<'a>(
|
||||
) -> Result<u64, DownloadError> {
|
||||
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||
|
||||
let timeline_path = conf.timeline_path(&tenant_id, &timeline_id);
|
||||
let local_path = conf
|
||||
.timeline_path(&tenant_id, &timeline_id)
|
||||
.join(layer_file_name.file_name());
|
||||
|
||||
let local_path = timeline_path.join(layer_file_name.file_name());
|
||||
|
||||
let remote_path = conf
|
||||
.remote_path(&local_path)
|
||||
.map_err(DownloadError::Other)?;
|
||||
let remote_path = remote_layer_path(&tenant_id, &timeline_id, layer_file_name, layer_metadata);
|
||||
|
||||
// Perform a rename inspired by durable_rename from file_utils.c.
|
||||
// The sequence:
|
||||
@@ -64,33 +64,43 @@ pub async fn download_layer_file<'a>(
|
||||
let (mut destination_file, bytes_amount) = download_retry(
|
||||
|| async {
|
||||
// TODO: this doesn't use the cached fd for some reason?
|
||||
let mut destination_file = fs::File::create(&temp_file_path).await.with_context(|| {
|
||||
format!(
|
||||
"create a destination file for layer '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
let mut download = storage.download(&remote_path).await.with_context(|| {
|
||||
format!(
|
||||
let mut destination_file = fs::File::create(&temp_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"create a destination file for layer '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
let mut download = storage
|
||||
.download(&remote_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"open a download stream for layer with remote storage path '{remote_path:?}'"
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
let bytes_amount = tokio::time::timeout(MAX_DOWNLOAD_DURATION, tokio::io::copy(&mut download.download_stream, &mut destination_file))
|
||||
.await
|
||||
.map_err(|e| DownloadError::Other(anyhow::anyhow!("Timed out {:?}", e)))?
|
||||
.with_context(|| {
|
||||
format!("Failed to download layer with remote storage path '{remote_path:?}' into file {temp_file_path:?}")
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
Ok((destination_file, bytes_amount))
|
||||
let bytes_amount = tokio::time::timeout(
|
||||
MAX_DOWNLOAD_DURATION,
|
||||
tokio::io::copy(&mut download.download_stream, &mut destination_file),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| DownloadError::Other(anyhow::anyhow!("Timed out {:?}", e)))?
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"download layer at remote path '{remote_path:?}' into file {temp_file_path:?}"
|
||||
)
|
||||
})
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
Ok((destination_file, bytes_amount))
|
||||
},
|
||||
&format!("download {remote_path:?}"),
|
||||
).await?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
|
||||
// A file will not be closed immediately when it goes out of scope if there are any IO operations
|
||||
@@ -103,12 +113,7 @@ pub async fn download_layer_file<'a>(
|
||||
destination_file
|
||||
.flush()
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to flush source file at {}",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})
|
||||
.with_context(|| format!("flush source file at {}", temp_file_path.display()))
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
let expected = layer_metadata.file_size();
|
||||
@@ -139,17 +144,12 @@ pub async fn download_layer_file<'a>(
|
||||
|
||||
fs::rename(&temp_file_path, &local_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Could not rename download layer file to {}",
|
||||
local_path.display(),
|
||||
)
|
||||
})
|
||||
.with_context(|| format!("rename download layer file to {}", local_path.display(),))
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
crashsafe::fsync_async(&local_path)
|
||||
.await
|
||||
.with_context(|| format!("Could not fsync layer file {}", local_path.display(),))
|
||||
.with_context(|| format!("fsync layer file {}", local_path.display(),))
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
tracing::debug!("download complete: {}", local_path.display());
|
||||
@@ -173,21 +173,19 @@ pub fn is_temp_download_file(path: &Path) -> bool {
|
||||
}
|
||||
|
||||
/// List timelines of given tenant in remote storage
|
||||
pub async fn list_remote_timelines<'a>(
|
||||
storage: &'a GenericRemoteStorage,
|
||||
conf: &'static PageServerConf,
|
||||
pub async fn list_remote_timelines(
|
||||
storage: &GenericRemoteStorage,
|
||||
tenant_id: TenantId,
|
||||
) -> anyhow::Result<HashSet<TimelineId>> {
|
||||
let tenant_path = conf.timelines_path(&tenant_id);
|
||||
let tenant_storage_path = conf.remote_path(&tenant_path)?;
|
||||
let remote_path = remote_timelines_path(&tenant_id);
|
||||
|
||||
fail::fail_point!("storage-sync-list-remote-timelines", |_| {
|
||||
anyhow::bail!("storage-sync-list-remote-timelines");
|
||||
});
|
||||
|
||||
let timelines = download_retry(
|
||||
|| storage.list_prefixes(Some(&tenant_storage_path)),
|
||||
&format!("list prefixes for {tenant_path:?}"),
|
||||
|| storage.list_prefixes(Some(&remote_path)),
|
||||
&format!("list prefixes for {tenant_id}"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -202,9 +200,9 @@ pub async fn list_remote_timelines<'a>(
|
||||
anyhow::anyhow!("failed to get timeline id for remote tenant {tenant_id}")
|
||||
})?;
|
||||
|
||||
let timeline_id: TimelineId = object_name.parse().with_context(|| {
|
||||
format!("failed to parse object name into timeline id '{object_name}'")
|
||||
})?;
|
||||
let timeline_id: TimelineId = object_name
|
||||
.parse()
|
||||
.with_context(|| format!("parse object name into timeline id '{object_name}'"))?;
|
||||
|
||||
// list_prefixes is assumed to return unique names. Ensure this here.
|
||||
// NB: it's safer to bail out than warn-log this because the pageserver
|
||||
@@ -222,21 +220,16 @@ pub async fn list_remote_timelines<'a>(
|
||||
}
|
||||
|
||||
pub(super) async fn download_index_part(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &GenericRemoteStorage,
|
||||
tenant_id: &TenantId,
|
||||
timeline_id: &TimelineId,
|
||||
generation: Generation,
|
||||
) -> Result<IndexPart, DownloadError> {
|
||||
let index_part_path = conf
|
||||
.metadata_path(tenant_id, timeline_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
let part_storage_path = conf
|
||||
.remote_path(&index_part_path)
|
||||
.map_err(DownloadError::BadInput)?;
|
||||
let remote_path = remote_index_path(tenant_id, timeline_id, generation);
|
||||
|
||||
let index_part_bytes = download_retry(
|
||||
|| async {
|
||||
let mut index_part_download = storage.download(&part_storage_path).await?;
|
||||
let mut index_part_download = storage.download(&remote_path).await?;
|
||||
|
||||
let mut index_part_bytes = Vec::new();
|
||||
tokio::io::copy(
|
||||
@@ -244,20 +237,16 @@ pub(super) async fn download_index_part(
|
||||
&mut index_part_bytes,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to download an index part into file {index_part_path:?}")
|
||||
})
|
||||
.with_context(|| format!("download index part at {remote_path:?}"))
|
||||
.map_err(DownloadError::Other)?;
|
||||
Ok(index_part_bytes)
|
||||
},
|
||||
&format!("download {part_storage_path:?}"),
|
||||
&format!("download {remote_path:?}"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let index_part: IndexPart = serde_json::from_slice(&index_part_bytes)
|
||||
.with_context(|| {
|
||||
format!("Failed to deserialize index part file into file {index_part_path:?}")
|
||||
})
|
||||
.with_context(|| format!("download index part file at {remote_path:?}"))
|
||||
.map_err(DownloadError::Other)?;
|
||||
|
||||
Ok(index_part)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//! Able to restore itself from the storage index parts, that are located in every timeline's remote directory and contain all data about
|
||||
//! remote timeline layers and its metadata.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -12,6 +12,7 @@ use utils::bin_ser::SerializeError;
|
||||
use crate::tenant::metadata::TimelineMetadata;
|
||||
use crate::tenant::storage_layer::LayerFileName;
|
||||
use crate::tenant::upload_queue::UploadQueueInitialized;
|
||||
use crate::tenant::Generation;
|
||||
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
@@ -20,22 +21,28 @@ use utils::lsn::Lsn;
|
||||
/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which
|
||||
/// might have less or more metadata depending if upgrading or rolling back an upgrade.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[cfg_attr(test, derive(Default))]
|
||||
//#[cfg_attr(test, derive(Default))]
|
||||
pub struct LayerFileMetadata {
|
||||
file_size: u64,
|
||||
|
||||
pub(crate) generation: Generation,
|
||||
}
|
||||
|
||||
impl From<&'_ IndexLayerMetadata> for LayerFileMetadata {
|
||||
fn from(other: &IndexLayerMetadata) -> Self {
|
||||
LayerFileMetadata {
|
||||
file_size: other.file_size,
|
||||
generation: other.generation,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LayerFileMetadata {
|
||||
pub fn new(file_size: u64) -> Self {
|
||||
LayerFileMetadata { file_size }
|
||||
pub fn new(file_size: u64, generation: Generation) -> Self {
|
||||
LayerFileMetadata {
|
||||
file_size,
|
||||
generation,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn file_size(&self) -> u64 {
|
||||
@@ -62,10 +69,6 @@ pub struct IndexPart {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deleted_at: Option<NaiveDateTime>,
|
||||
|
||||
/// Legacy field: equal to the keys of `layer_metadata`, only written out for forward compat
|
||||
#[serde(default, skip_deserializing)]
|
||||
timeline_layers: HashSet<LayerFileName>,
|
||||
|
||||
/// Per layer file name metadata, which can be present for a present or missing layer file.
|
||||
///
|
||||
/// Older versions of `IndexPart` will not have this property or have only a part of metadata
|
||||
@@ -91,7 +94,12 @@ impl IndexPart {
|
||||
/// - 2: added `deleted_at`
|
||||
/// - 3: no longer deserialize `timeline_layers` (serialized format is the same, but timeline_layers
|
||||
/// is always generated from the keys of `layer_metadata`)
|
||||
const LATEST_VERSION: usize = 3;
|
||||
/// - 4: timeline_layers is fully removed.
|
||||
const LATEST_VERSION: usize = 4;
|
||||
|
||||
// Versions we may see when reading from a bucket.
|
||||
pub const KNOWN_VERSIONS: &[usize] = &[1, 2, 3, 4];
|
||||
|
||||
pub const FILE_NAME: &'static str = "index_part.json";
|
||||
|
||||
pub fn new(
|
||||
@@ -99,24 +107,30 @@ impl IndexPart {
|
||||
disk_consistent_lsn: Lsn,
|
||||
metadata: TimelineMetadata,
|
||||
) -> Self {
|
||||
let mut timeline_layers = HashSet::with_capacity(layers_and_metadata.len());
|
||||
let mut layer_metadata = HashMap::with_capacity(layers_and_metadata.len());
|
||||
|
||||
for (remote_name, metadata) in &layers_and_metadata {
|
||||
timeline_layers.insert(remote_name.to_owned());
|
||||
let metadata = IndexLayerMetadata::from(metadata);
|
||||
layer_metadata.insert(remote_name.to_owned(), metadata);
|
||||
}
|
||||
// Transform LayerFileMetadata into IndexLayerMetadata
|
||||
let layer_metadata = layers_and_metadata
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, IndexLayerMetadata::from(v)))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
version: Self::LATEST_VERSION,
|
||||
timeline_layers,
|
||||
layer_metadata,
|
||||
disk_consistent_lsn,
|
||||
metadata,
|
||||
deleted_at: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_version(&self) -> usize {
|
||||
self.version
|
||||
}
|
||||
|
||||
/// If you want this under normal operations, read it from self.metadata:
|
||||
/// this method is just for the scrubber to use when validating an index.
|
||||
pub fn get_disk_consistent_lsn(&self) -> Lsn {
|
||||
self.disk_consistent_lsn
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&UploadQueueInitialized> for IndexPart {
|
||||
@@ -135,15 +149,20 @@ impl TryFrom<&UploadQueueInitialized> for IndexPart {
|
||||
}
|
||||
|
||||
/// Serialized form of [`LayerFileMetadata`].
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub struct IndexLayerMetadata {
|
||||
pub(super) file_size: u64,
|
||||
pub file_size: u64,
|
||||
|
||||
#[serde(default = "Generation::none")]
|
||||
#[serde(skip_serializing_if = "Generation::is_none")]
|
||||
pub(super) generation: Generation,
|
||||
}
|
||||
|
||||
impl From<&'_ LayerFileMetadata> for IndexLayerMetadata {
|
||||
fn from(other: &'_ LayerFileMetadata) -> Self {
|
||||
impl From<LayerFileMetadata> for IndexLayerMetadata {
|
||||
fn from(other: LayerFileMetadata) -> Self {
|
||||
IndexLayerMetadata {
|
||||
file_size: other.file_size,
|
||||
generation: other.generation,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -168,15 +187,16 @@ mod tests {
|
||||
let expected = IndexPart {
|
||||
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
|
||||
version: 1,
|
||||
timeline_layers: HashSet::new(),
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata {
|
||||
// serde_json should always parse this but this might be a double with jq for
|
||||
// example.
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
@@ -205,15 +225,16 @@ mod tests {
|
||||
let expected = IndexPart {
|
||||
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
|
||||
version: 1,
|
||||
timeline_layers: HashSet::new(),
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata {
|
||||
// serde_json should always parse this but this might be a double with jq for
|
||||
// example.
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
@@ -243,15 +264,16 @@ mod tests {
|
||||
let expected = IndexPart {
|
||||
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
|
||||
version: 2,
|
||||
timeline_layers: HashSet::new(),
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata {
|
||||
// serde_json should always parse this but this might be a double with jq for
|
||||
// example.
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
@@ -276,7 +298,6 @@ mod tests {
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 1,
|
||||
timeline_layers: HashSet::new(),
|
||||
layer_metadata: HashMap::new(),
|
||||
disk_consistent_lsn: "0/2532648".parse::<Lsn>().unwrap(),
|
||||
metadata: TimelineMetadata::from_bytes(&[
|
||||
@@ -309,4 +330,41 @@ mod tests {
|
||||
|
||||
assert_eq!(empty_layers_parsed, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v4_indexpart_is_parsed() {
|
||||
let example = r#"{
|
||||
"version":4,
|
||||
"layer_metadata":{
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
|
||||
},
|
||||
"disk_consistent_lsn":"0/16960E8",
|
||||
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
||||
"deleted_at": "2023-07-31T09:00:00.123"
|
||||
}"#;
|
||||
|
||||
let expected = IndexPart {
|
||||
version: 4,
|
||||
layer_metadata: HashMap::from([
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
||||
file_size: 25600000,
|
||||
generation: Generation::none()
|
||||
}),
|
||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata {
|
||||
// serde_json should always parse this but this might be a double with jq for
|
||||
// example.
|
||||
file_size: 9007199254741001,
|
||||
generation: Generation::none()
|
||||
})
|
||||
]),
|
||||
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
|
||||
metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(),
|
||||
deleted_at: Some(chrono::NaiveDateTime::parse_from_str(
|
||||
"2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap())
|
||||
};
|
||||
|
||||
let part = serde_json::from_str::<IndexPart>(example).unwrap();
|
||||
assert_eq!(part, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,11 @@ use fail::fail_point;
|
||||
use std::{io::ErrorKind, path::Path};
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{config::PageServerConf, tenant::remote_timeline_client::index::IndexPart};
|
||||
use super::Generation;
|
||||
use crate::{
|
||||
config::PageServerConf,
|
||||
tenant::remote_timeline_client::{index::IndexPart, remote_index_path, remote_path},
|
||||
};
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
|
||||
@@ -15,10 +19,10 @@ use tracing::info;
|
||||
|
||||
/// Serializes and uploads the given index part data to the remote storage.
|
||||
pub(super) async fn upload_index_part<'a>(
|
||||
conf: &'static PageServerConf,
|
||||
storage: &'a GenericRemoteStorage,
|
||||
tenant_id: &TenantId,
|
||||
timeline_id: &TimelineId,
|
||||
generation: Generation,
|
||||
index_part: &'a IndexPart,
|
||||
) -> anyhow::Result<()> {
|
||||
tracing::trace!("uploading new index part");
|
||||
@@ -27,20 +31,16 @@ pub(super) async fn upload_index_part<'a>(
|
||||
bail!("failpoint before-upload-index")
|
||||
});
|
||||
|
||||
let index_part_bytes = serde_json::to_vec(&index_part)
|
||||
.context("Failed to serialize index part file into bytes")?;
|
||||
let index_part_bytes =
|
||||
serde_json::to_vec(&index_part).context("serialize index part file into bytes")?;
|
||||
let index_part_size = index_part_bytes.len();
|
||||
let index_part_bytes = tokio::io::BufReader::new(std::io::Cursor::new(index_part_bytes));
|
||||
|
||||
let index_part_path = conf
|
||||
.metadata_path(tenant_id, timeline_id)
|
||||
.with_file_name(IndexPart::FILE_NAME);
|
||||
let storage_path = conf.remote_path(&index_part_path)?;
|
||||
|
||||
let remote_path = remote_index_path(tenant_id, timeline_id, generation);
|
||||
storage
|
||||
.upload_storage_object(Box::new(index_part_bytes), index_part_size, &storage_path)
|
||||
.upload_storage_object(Box::new(index_part_bytes), index_part_size, &remote_path)
|
||||
.await
|
||||
.with_context(|| format!("Failed to upload index part for '{tenant_id} / {timeline_id}'"))
|
||||
.with_context(|| format!("upload index part for '{tenant_id} / {timeline_id}'"))
|
||||
}
|
||||
|
||||
/// Attempts to upload given layer files.
|
||||
@@ -52,12 +52,13 @@ pub(super) async fn upload_timeline_layer<'a>(
|
||||
storage: &'a GenericRemoteStorage,
|
||||
source_path: &'a Path,
|
||||
known_metadata: &'a LayerFileMetadata,
|
||||
generation: Generation,
|
||||
) -> anyhow::Result<()> {
|
||||
fail_point!("before-upload-layer", |_| {
|
||||
bail!("failpoint before-upload-layer")
|
||||
});
|
||||
let storage_path = conf.remote_path(source_path)?;
|
||||
|
||||
let storage_path = remote_path(conf, source_path, generation)?;
|
||||
let source_file_res = fs::File::open(&source_path).await;
|
||||
let source_file = match source_file_res {
|
||||
Ok(source_file) => source_file,
|
||||
@@ -70,16 +71,15 @@ pub(super) async fn upload_timeline_layer<'a>(
|
||||
info!(path = %source_path.display(), "File to upload doesn't exist. Likely the file has been deleted and an upload is not required any more.");
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => Err(e)
|
||||
.with_context(|| format!("Failed to open a source file for layer {source_path:?}"))?,
|
||||
Err(e) => {
|
||||
Err(e).with_context(|| format!("open a source file for layer {source_path:?}"))?
|
||||
}
|
||||
};
|
||||
|
||||
let fs_size = source_file
|
||||
.metadata()
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to get the source file metadata for layer {source_path:?}")
|
||||
})?
|
||||
.with_context(|| format!("get the source file metadata for layer {source_path:?}"))?
|
||||
.len();
|
||||
|
||||
let metadata_size = known_metadata.file_size();
|
||||
@@ -87,19 +87,13 @@ pub(super) async fn upload_timeline_layer<'a>(
|
||||
bail!("File {source_path:?} has its current FS size {fs_size} diferent from initially determined {metadata_size}");
|
||||
}
|
||||
|
||||
let fs_size = usize::try_from(fs_size).with_context(|| {
|
||||
format!("File {source_path:?} size {fs_size} could not be converted to usize")
|
||||
})?;
|
||||
let fs_size = usize::try_from(fs_size)
|
||||
.with_context(|| format!("convert {source_path:?} size {fs_size} usize"))?;
|
||||
|
||||
storage
|
||||
.upload(source_file, fs_size, &storage_path, None)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload a layer from local path '{}'",
|
||||
source_path.display()
|
||||
)
|
||||
})?;
|
||||
.with_context(|| format!("upload layer from local path '{}'", source_path.display()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ use crate::config::PageServerConf;
|
||||
use crate::context::RequestContext;
|
||||
use crate::page_cache::PAGE_SZ;
|
||||
use crate::repository::{Key, Value, KEY_SIZE};
|
||||
use crate::tenant::blob_io::{BlobWriter, WriteBlobWriter};
|
||||
use crate::tenant::blob_io::WriteBlobWriter;
|
||||
use crate::tenant::block_io::{BlockBuf, BlockCursor, BlockLease, BlockReader, FileBlockReader};
|
||||
use crate::tenant::disk_btree::{DiskBtreeBuilder, DiskBtreeReader, VisitDirection};
|
||||
use crate::tenant::storage_layer::{
|
||||
@@ -45,8 +45,8 @@ use pageserver_api::models::{HistoricLayerInfo, LayerAccessKind};
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::{self, File};
|
||||
use std::io::SeekFrom;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::ops::Range;
|
||||
use std::os::unix::fs::FileExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -632,11 +632,12 @@ impl DeltaLayerWriterInner {
|
||||
///
|
||||
/// The values must be appended in key, lsn order.
|
||||
///
|
||||
fn put_value(&mut self, key: Key, lsn: Lsn, val: Value) -> anyhow::Result<()> {
|
||||
async fn put_value(&mut self, key: Key, lsn: Lsn, val: Value) -> anyhow::Result<()> {
|
||||
self.put_value_bytes(key, lsn, &Value::ser(&val)?, val.will_init())
|
||||
.await
|
||||
}
|
||||
|
||||
fn put_value_bytes(
|
||||
async fn put_value_bytes(
|
||||
&mut self,
|
||||
key: Key,
|
||||
lsn: Lsn,
|
||||
@@ -645,7 +646,7 @@ impl DeltaLayerWriterInner {
|
||||
) -> anyhow::Result<()> {
|
||||
assert!(self.lsn_range.start <= lsn);
|
||||
|
||||
let off = self.blob_writer.write_blob(val)?;
|
||||
let off = self.blob_writer.write_blob(val).await?;
|
||||
|
||||
let blob_ref = BlobRef::new(off, will_init);
|
||||
|
||||
@@ -797,11 +798,11 @@ impl DeltaLayerWriter {
|
||||
///
|
||||
/// The values must be appended in key, lsn order.
|
||||
///
|
||||
pub fn put_value(&mut self, key: Key, lsn: Lsn, val: Value) -> anyhow::Result<()> {
|
||||
self.inner.as_mut().unwrap().put_value(key, lsn, val)
|
||||
pub async fn put_value(&mut self, key: Key, lsn: Lsn, val: Value) -> anyhow::Result<()> {
|
||||
self.inner.as_mut().unwrap().put_value(key, lsn, val).await
|
||||
}
|
||||
|
||||
pub fn put_value_bytes(
|
||||
pub async fn put_value_bytes(
|
||||
&mut self,
|
||||
key: Key,
|
||||
lsn: Lsn,
|
||||
@@ -812,6 +813,7 @@ impl DeltaLayerWriter {
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.put_value_bytes(key, lsn, val, will_init)
|
||||
.await
|
||||
}
|
||||
|
||||
pub fn size(&self) -> u64 {
|
||||
|
||||
@@ -212,7 +212,7 @@ pub enum LayerFileName {
|
||||
}
|
||||
|
||||
impl LayerFileName {
|
||||
pub(crate) fn file_name(&self) -> String {
|
||||
pub fn file_name(&self) -> String {
|
||||
self.to_string()
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ use crate::config::PageServerConf;
|
||||
use crate::context::RequestContext;
|
||||
use crate::page_cache::PAGE_SZ;
|
||||
use crate::repository::{Key, KEY_SIZE};
|
||||
use crate::tenant::blob_io::{BlobWriter, WriteBlobWriter};
|
||||
use crate::tenant::blob_io::WriteBlobWriter;
|
||||
use crate::tenant::block_io::{BlockBuf, BlockReader, FileBlockReader};
|
||||
use crate::tenant::disk_btree::{DiskBtreeBuilder, DiskBtreeReader, VisitDirection};
|
||||
use crate::tenant::storage_layer::{
|
||||
@@ -42,8 +42,8 @@ use pageserver_api::models::{HistoricLayerInfo, LayerAccessKind};
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::{self, File};
|
||||
use std::io::SeekFrom;
|
||||
use std::io::Write;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::ops::Range;
|
||||
use std::os::unix::prelude::FileExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -569,9 +569,9 @@ impl ImageLayerWriterInner {
|
||||
///
|
||||
/// The page versions must be appended in blknum order.
|
||||
///
|
||||
fn put_image(&mut self, key: Key, img: &[u8]) -> anyhow::Result<()> {
|
||||
async fn put_image(&mut self, key: Key, img: &[u8]) -> anyhow::Result<()> {
|
||||
ensure!(self.key_range.contains(&key));
|
||||
let off = self.blob_writer.write_blob(img)?;
|
||||
let off = self.blob_writer.write_blob(img).await?;
|
||||
|
||||
let mut keybuf: [u8; KEY_SIZE] = [0u8; KEY_SIZE];
|
||||
key.write_to_byte_slice(&mut keybuf);
|
||||
@@ -710,8 +710,8 @@ impl ImageLayerWriter {
|
||||
///
|
||||
/// The page versions must be appended in blknum order.
|
||||
///
|
||||
pub fn put_image(&mut self, key: Key, img: &[u8]) -> anyhow::Result<()> {
|
||||
self.inner.as_mut().unwrap().put_image(key, img)
|
||||
pub async fn put_image(&mut self, key: Key, img: &[u8]) -> anyhow::Result<()> {
|
||||
self.inner.as_mut().unwrap().put_image(key, img).await
|
||||
}
|
||||
|
||||
///
|
||||
|
||||
@@ -348,7 +348,9 @@ impl InMemoryLayer {
|
||||
for (lsn, pos) in vec_map.as_slice() {
|
||||
cursor.read_blob_into_buf(*pos, &mut buf).await?;
|
||||
let will_init = Value::des(&buf)?.will_init();
|
||||
delta_layer_writer.put_value_bytes(key, *lsn, &buf, will_init)?;
|
||||
delta_layer_writer
|
||||
.put_value_bytes(key, *lsn, &buf, will_init)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ use postgres_connection::PgConnectionConfig;
|
||||
use postgres_ffi::to_pg_timestamp;
|
||||
use utils::{
|
||||
completion,
|
||||
generation::Generation,
|
||||
id::{TenantId, TimelineId},
|
||||
lsn::{AtomicLsn, Lsn, RecordLsn},
|
||||
seqwait::SeqWait,
|
||||
@@ -152,6 +153,10 @@ pub struct Timeline {
|
||||
pub tenant_id: TenantId,
|
||||
pub timeline_id: TimelineId,
|
||||
|
||||
/// The generation of the tenant that instantiated us: this is used for safety when writing remote objects.
|
||||
/// Never changes for the lifetime of this [`Timeline`] object.
|
||||
generation: Generation,
|
||||
|
||||
pub pg_version: u32,
|
||||
|
||||
/// The tuple has two elements.
|
||||
@@ -1199,7 +1204,7 @@ impl Timeline {
|
||||
Ok(delta) => Some(delta),
|
||||
};
|
||||
|
||||
let layer_metadata = LayerFileMetadata::new(layer_file_size);
|
||||
let layer_metadata = LayerFileMetadata::new(layer_file_size, self.generation);
|
||||
|
||||
let new_remote_layer = Arc::new(match local_layer.filename() {
|
||||
LayerFileName::Image(image_name) => RemoteLayer::new_img(
|
||||
@@ -1377,6 +1382,7 @@ impl Timeline {
|
||||
ancestor: Option<Arc<Timeline>>,
|
||||
timeline_id: TimelineId,
|
||||
tenant_id: TenantId,
|
||||
generation: Generation,
|
||||
walredo_mgr: Arc<dyn WalRedoManager + Send + Sync>,
|
||||
resources: TimelineResources,
|
||||
pg_version: u32,
|
||||
@@ -1406,6 +1412,7 @@ impl Timeline {
|
||||
myself: myself.clone(),
|
||||
timeline_id,
|
||||
tenant_id,
|
||||
generation,
|
||||
pg_version,
|
||||
layers: Arc::new(tokio::sync::RwLock::new(LayerManager::create())),
|
||||
wanted_image_layers: Mutex::new(None),
|
||||
@@ -1615,6 +1622,9 @@ impl Timeline {
|
||||
let (conf, tenant_id, timeline_id) = (self.conf, self.tenant_id, self.timeline_id);
|
||||
let span = tracing::Span::current();
|
||||
|
||||
// Copy to move into the task we're about to spawn
|
||||
let generation = self.generation;
|
||||
|
||||
let (loaded_layers, to_sync, total_physical_size) = tokio::task::spawn_blocking({
|
||||
move || {
|
||||
let _g = span.entered();
|
||||
@@ -1656,8 +1666,12 @@ impl Timeline {
|
||||
);
|
||||
}
|
||||
|
||||
let decided =
|
||||
init::reconcile(discovered_layers, index_part.as_ref(), disk_consistent_lsn);
|
||||
let decided = init::reconcile(
|
||||
discovered_layers,
|
||||
index_part.as_ref(),
|
||||
disk_consistent_lsn,
|
||||
generation,
|
||||
);
|
||||
|
||||
let mut loaded_layers = Vec::new();
|
||||
let mut needs_upload = Vec::new();
|
||||
@@ -2669,7 +2683,7 @@ impl Timeline {
|
||||
(
|
||||
HashMap::from([(
|
||||
layer.filename(),
|
||||
LayerFileMetadata::new(layer.layer_desc().file_size),
|
||||
LayerFileMetadata::new(layer.layer_desc().file_size, self.generation),
|
||||
)]),
|
||||
Some(layer),
|
||||
)
|
||||
@@ -2721,6 +2735,7 @@ impl Timeline {
|
||||
if disk_consistent_lsn != old_disk_consistent_lsn {
|
||||
assert!(disk_consistent_lsn > old_disk_consistent_lsn);
|
||||
self.update_metadata_file(disk_consistent_lsn, layer_paths_to_upload)
|
||||
.await
|
||||
.context("update_metadata_file")?;
|
||||
// Also update the in-memory copy
|
||||
self.disk_consistent_lsn.store(disk_consistent_lsn);
|
||||
@@ -2729,7 +2744,7 @@ impl Timeline {
|
||||
}
|
||||
|
||||
/// Update metadata file
|
||||
fn update_metadata_file(
|
||||
async fn update_metadata_file(
|
||||
&self,
|
||||
disk_consistent_lsn: Lsn,
|
||||
layer_paths_to_upload: HashMap<LayerFileName, LayerFileMetadata>,
|
||||
@@ -2777,6 +2792,7 @@ impl Timeline {
|
||||
&metadata,
|
||||
false,
|
||||
)
|
||||
.await
|
||||
.context("save_metadata")?;
|
||||
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
@@ -3016,7 +3032,7 @@ impl Timeline {
|
||||
}
|
||||
}
|
||||
};
|
||||
image_layer_writer.put_image(key, &img)?;
|
||||
image_layer_writer.put_image(key, &img).await?;
|
||||
key = key.next();
|
||||
}
|
||||
}
|
||||
@@ -3065,7 +3081,10 @@ impl Timeline {
|
||||
.metadata()
|
||||
.with_context(|| format!("reading metadata of layer file {}", path.file_name()))?;
|
||||
|
||||
layer_paths_to_upload.insert(path, LayerFileMetadata::new(metadata.len()));
|
||||
layer_paths_to_upload.insert(
|
||||
path,
|
||||
LayerFileMetadata::new(metadata.len(), self.generation),
|
||||
);
|
||||
|
||||
self.metrics
|
||||
.resident_physical_size_gauge
|
||||
@@ -3599,7 +3618,7 @@ impl Timeline {
|
||||
)))
|
||||
});
|
||||
|
||||
writer.as_mut().unwrap().put_value(key, lsn, value)?;
|
||||
writer.as_mut().unwrap().put_value(key, lsn, value).await?;
|
||||
prev_key = Some(key);
|
||||
}
|
||||
if let Some(writer) = writer {
|
||||
@@ -3740,7 +3759,7 @@ impl Timeline {
|
||||
if let Some(remote_client) = &self.remote_client {
|
||||
remote_client.schedule_layer_file_upload(
|
||||
&l.filename(),
|
||||
&LayerFileMetadata::new(metadata.len()),
|
||||
&LayerFileMetadata::new(metadata.len(), self.generation),
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -3749,7 +3768,10 @@ impl Timeline {
|
||||
.resident_physical_size_gauge
|
||||
.add(metadata.len());
|
||||
|
||||
new_layer_paths.insert(new_delta_path, LayerFileMetadata::new(metadata.len()));
|
||||
new_layer_paths.insert(
|
||||
new_delta_path,
|
||||
LayerFileMetadata::new(metadata.len(), self.generation),
|
||||
);
|
||||
l.access_stats().record_residence_event(
|
||||
LayerResidenceStatus::Resident,
|
||||
LayerResidenceEventReason::LayerCreate,
|
||||
@@ -4102,7 +4124,8 @@ impl Timeline {
|
||||
if !layers_to_remove.is_empty() {
|
||||
// Persist the new GC cutoff value in the metadata file, before
|
||||
// we actually remove anything.
|
||||
self.update_metadata_file(self.disk_consistent_lsn.load(), HashMap::new())?;
|
||||
self.update_metadata_file(self.disk_consistent_lsn.load(), HashMap::new())
|
||||
.await?;
|
||||
|
||||
// Actually delete the layers from disk and remove them from the map.
|
||||
// (couldn't do this in the loop above, because you cannot modify a collection
|
||||
|
||||
@@ -7,6 +7,7 @@ use crate::{
|
||||
index::{IndexPart, LayerFileMetadata},
|
||||
},
|
||||
storage_layer::LayerFileName,
|
||||
Generation,
|
||||
},
|
||||
METADATA_FILE_NAME,
|
||||
};
|
||||
@@ -104,6 +105,7 @@ pub(super) fn reconcile(
|
||||
discovered: Vec<(LayerFileName, u64)>,
|
||||
index_part: Option<&IndexPart>,
|
||||
disk_consistent_lsn: Lsn,
|
||||
generation: Generation,
|
||||
) -> Vec<(LayerFileName, Result<Decision, FutureLayer>)> {
|
||||
use Decision::*;
|
||||
|
||||
@@ -112,7 +114,15 @@ pub(super) fn reconcile(
|
||||
|
||||
let mut discovered = discovered
|
||||
.into_iter()
|
||||
.map(|(name, file_size)| (name, (Some(LayerFileMetadata::new(file_size)), None)))
|
||||
.map(|(name, file_size)| {
|
||||
(
|
||||
name,
|
||||
// The generation here will be corrected to match IndexPart in the merge below, unless
|
||||
// it is not in IndexPart, in which case using our current generation makes sense
|
||||
// because it will be uploaded in this generation.
|
||||
(Some(LayerFileMetadata::new(file_size, generation)), None),
|
||||
)
|
||||
})
|
||||
.collect::<Collected>();
|
||||
|
||||
// merge any index_part information, when available
|
||||
@@ -137,7 +147,11 @@ pub(super) fn reconcile(
|
||||
Err(FutureLayer { local })
|
||||
} else {
|
||||
Ok(match (local, remote) {
|
||||
(Some(local), Some(remote)) if local != remote => UseRemote { local, remote },
|
||||
(Some(local), Some(remote)) if local != remote => {
|
||||
assert_eq!(local.generation, remote.generation);
|
||||
|
||||
UseRemote { local, remote }
|
||||
}
|
||||
(Some(x), Some(_)) => UseLocal(x),
|
||||
(None, Some(x)) => Evicted(x),
|
||||
(Some(x), None) => NeedsUpload(x),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::metrics::RemoteOpFileKind;
|
||||
|
||||
use super::storage_layer::LayerFileName;
|
||||
use super::Generation;
|
||||
use crate::tenant::metadata::TimelineMetadata;
|
||||
use crate::tenant::remote_timeline_client::index::IndexPart;
|
||||
use crate::tenant::remote_timeline_client::index::LayerFileMetadata;
|
||||
@@ -205,6 +206,7 @@ pub(crate) struct Delete {
|
||||
pub(crate) file_kind: RemoteOpFileKind,
|
||||
pub(crate) layer_file_name: LayerFileName,
|
||||
pub(crate) scheduled_from_timeline_delete: bool,
|
||||
pub(crate) generation: Generation,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -228,17 +230,21 @@ impl std::fmt::Display for UploadOp {
|
||||
UploadOp::UploadLayer(path, metadata) => {
|
||||
write!(
|
||||
f,
|
||||
"UploadLayer({}, size={:?})",
|
||||
"UploadLayer({}, size={:?}, gen={:?})",
|
||||
path.file_name(),
|
||||
metadata.file_size()
|
||||
metadata.file_size(),
|
||||
metadata.generation,
|
||||
)
|
||||
}
|
||||
UploadOp::UploadMetadata(_, lsn) => write!(f, "UploadMetadata(lsn: {})", lsn),
|
||||
UploadOp::UploadMetadata(_, lsn) => {
|
||||
write!(f, "UploadMetadata(lsn: {})", lsn)
|
||||
}
|
||||
UploadOp::Delete(delete) => write!(
|
||||
f,
|
||||
"Delete(path: {}, scheduled_from_timeline_delete: {})",
|
||||
"Delete(path: {}, scheduled_from_timeline_delete: {}, gen: {:?})",
|
||||
delete.layer_file_name.file_name(),
|
||||
delete.scheduled_from_timeline_delete
|
||||
delete.scheduled_from_timeline_delete,
|
||||
delete.generation
|
||||
),
|
||||
UploadOp::Barrier(_) => write!(f, "Barrier"),
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
use crate::metrics::{STORAGE_IO_SIZE, STORAGE_IO_TIME};
|
||||
use once_cell::sync::OnceCell;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{Error, ErrorKind, Read, Seek, SeekFrom, Write};
|
||||
use std::io::{Error, ErrorKind, Seek, SeekFrom, Write};
|
||||
use std::os::unix::fs::FileExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
@@ -321,54 +321,8 @@ impl VirtualFile {
|
||||
drop(self);
|
||||
std::fs::remove_file(path).expect("failed to remove the virtual file");
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for VirtualFile {
|
||||
/// If a VirtualFile is dropped, close the underlying file if it was open.
|
||||
fn drop(&mut self) {
|
||||
let handle = self.handle.get_mut().unwrap();
|
||||
|
||||
// We could check with a read-lock first, to avoid waiting on an
|
||||
// unrelated I/O.
|
||||
let slot = &get_open_files().slots[handle.index];
|
||||
let mut slot_guard = slot.inner.write().unwrap();
|
||||
if slot_guard.tag == handle.tag {
|
||||
slot.recently_used.store(false, Ordering::Relaxed);
|
||||
// there is also operation "close-by-replace" for closes done on eviction for
|
||||
// comparison.
|
||||
STORAGE_IO_TIME
|
||||
.with_label_values(&["close"])
|
||||
.observe_closure_duration(|| drop(slot_guard.file.take()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for VirtualFile {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
|
||||
let pos = self.pos;
|
||||
let n = self.read_at(buf, pos)?;
|
||||
self.pos += n as u64;
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for VirtualFile {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
let pos = self.pos;
|
||||
let n = self.write_at(buf, pos)?;
|
||||
self.pos += n as u64;
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
// flush is no-op for File (at least on unix), so we don't need to do
|
||||
// anything here either.
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for VirtualFile {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
|
||||
pub fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
|
||||
match pos {
|
||||
SeekFrom::Start(offset) => {
|
||||
self.pos = offset;
|
||||
@@ -392,10 +346,64 @@ impl Seek for VirtualFile {
|
||||
}
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl FileExt for VirtualFile {
|
||||
fn read_at(&self, buf: &mut [u8], offset: u64) -> Result<usize, Error> {
|
||||
// Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
|
||||
pub fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> Result<(), Error> {
|
||||
while !buf.is_empty() {
|
||||
match self.read_at(buf, offset) {
|
||||
Ok(0) => {
|
||||
return Err(Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"failed to fill whole buffer",
|
||||
))
|
||||
}
|
||||
Ok(n) => {
|
||||
buf = &mut buf[n..];
|
||||
offset += n as u64;
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
|
||||
pub async fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> Result<(), Error> {
|
||||
while !buf.is_empty() {
|
||||
match self.write_at(buf, offset) {
|
||||
Ok(0) => {
|
||||
return Err(Error::new(
|
||||
std::io::ErrorKind::WriteZero,
|
||||
"failed to write whole buffer",
|
||||
));
|
||||
}
|
||||
Ok(n) => {
|
||||
buf = &buf[n..];
|
||||
offset += n as u64;
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write the given buffer (which has to be below the kernel's internal page size) and fsync
|
||||
///
|
||||
/// This ensures some level of atomicity (not a good one, but it's the best we have).
|
||||
pub fn write_and_fsync(&mut self, buf: &[u8]) -> Result<(), Error> {
|
||||
if self.write(buf)? != buf.len() {
|
||||
return Err(Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"Could not write all the bytes in a single call",
|
||||
));
|
||||
}
|
||||
self.sync_all()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read_at(&self, buf: &mut [u8], offset: u64) -> Result<usize, Error> {
|
||||
let result = self.with_file("read", |file| file.read_at(buf, offset))?;
|
||||
if let Ok(size) = result {
|
||||
STORAGE_IO_SIZE
|
||||
@@ -405,7 +413,7 @@ impl FileExt for VirtualFile {
|
||||
result
|
||||
}
|
||||
|
||||
fn write_at(&self, buf: &[u8], offset: u64) -> Result<usize, Error> {
|
||||
pub fn write_at(&self, buf: &[u8], offset: u64) -> Result<usize, Error> {
|
||||
let result = self.with_file("write", |file| file.write_at(buf, offset))?;
|
||||
if let Ok(size) = result {
|
||||
STORAGE_IO_SIZE
|
||||
@@ -416,6 +424,41 @@ impl FileExt for VirtualFile {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for VirtualFile {
|
||||
/// If a VirtualFile is dropped, close the underlying file if it was open.
|
||||
fn drop(&mut self) {
|
||||
let handle = self.handle.get_mut().unwrap();
|
||||
|
||||
// We could check with a read-lock first, to avoid waiting on an
|
||||
// unrelated I/O.
|
||||
let slot = &get_open_files().slots[handle.index];
|
||||
let mut slot_guard = slot.inner.write().unwrap();
|
||||
if slot_guard.tag == handle.tag {
|
||||
slot.recently_used.store(false, Ordering::Relaxed);
|
||||
// there is also operation "close-by-replace" for closes done on eviction for
|
||||
// comparison.
|
||||
STORAGE_IO_TIME
|
||||
.with_label_values(&["close"])
|
||||
.observe_closure_duration(|| drop(slot_guard.file.take()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for VirtualFile {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
let pos = self.pos;
|
||||
let n = self.write_at(buf, pos)?;
|
||||
self.pos += n as u64;
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
// flush is no-op for File (at least on unix), so we don't need to do
|
||||
// anything here either.
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenFiles {
|
||||
fn new(num_slots: usize) -> OpenFiles {
|
||||
let mut slots = Box::new(Vec::with_capacity(num_slots));
|
||||
@@ -472,30 +515,60 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
// Helper function to slurp contents of a file, starting at the current position,
|
||||
// into a string
|
||||
fn read_string<FD>(vfile: &mut FD) -> Result<String, Error>
|
||||
where
|
||||
FD: Read,
|
||||
{
|
||||
let mut buf = String::new();
|
||||
vfile.read_to_string(&mut buf)?;
|
||||
Ok(buf)
|
||||
enum MaybeVirtualFile {
|
||||
VirtualFile(VirtualFile),
|
||||
File(File),
|
||||
}
|
||||
|
||||
// Helper function to slurp a portion of a file into a string
|
||||
fn read_string_at<FD>(vfile: &mut FD, pos: u64, len: usize) -> Result<String, Error>
|
||||
where
|
||||
FD: FileExt,
|
||||
{
|
||||
let mut buf = Vec::new();
|
||||
buf.resize(len, 0);
|
||||
vfile.read_exact_at(&mut buf, pos)?;
|
||||
Ok(String::from_utf8(buf).unwrap())
|
||||
impl MaybeVirtualFile {
|
||||
fn read_exact_at(&self, buf: &mut [u8], offset: u64) -> Result<(), Error> {
|
||||
match self {
|
||||
MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(buf, offset),
|
||||
MaybeVirtualFile::File(file) => file.read_exact_at(buf, offset),
|
||||
}
|
||||
}
|
||||
async fn write_all_at(&self, buf: &[u8], offset: u64) -> Result<(), Error> {
|
||||
match self {
|
||||
MaybeVirtualFile::VirtualFile(file) => file.write_all_at(buf, offset).await,
|
||||
MaybeVirtualFile::File(file) => file.write_all_at(buf, offset),
|
||||
}
|
||||
}
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
|
||||
match self {
|
||||
MaybeVirtualFile::VirtualFile(file) => file.seek(pos),
|
||||
MaybeVirtualFile::File(file) => file.seek(pos),
|
||||
}
|
||||
}
|
||||
async fn write_all(&mut self, buf: &[u8]) -> Result<(), Error> {
|
||||
match self {
|
||||
MaybeVirtualFile::VirtualFile(file) => file.write_all(buf),
|
||||
MaybeVirtualFile::File(file) => file.write_all(buf),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to slurp contents of a file, starting at the current position,
|
||||
// into a string
|
||||
async fn read_string(&mut self) -> Result<String, Error> {
|
||||
use std::io::Read;
|
||||
let mut buf = String::new();
|
||||
match self {
|
||||
MaybeVirtualFile::VirtualFile(file) => file.read_to_string(&mut buf)?,
|
||||
MaybeVirtualFile::File(file) => file.read_to_string(&mut buf)?,
|
||||
}
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
// Helper function to slurp a portion of a file into a string
|
||||
async fn read_string_at(&mut self, pos: u64, len: usize) -> Result<String, Error> {
|
||||
let mut buf = Vec::new();
|
||||
buf.resize(len, 0);
|
||||
self.read_exact_at(&mut buf, pos)?;
|
||||
Ok(String::from_utf8(buf).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_virtual_files() -> Result<(), Error> {
|
||||
#[tokio::test]
|
||||
async fn test_virtual_files() -> Result<(), Error> {
|
||||
// The real work is done in the test_files() helper function. This
|
||||
// allows us to run the same set of tests against a native File, and
|
||||
// VirtualFile. We trust the native Files and wouldn't need to test them,
|
||||
@@ -504,21 +577,23 @@ mod tests {
|
||||
// native files, you will run out of file descriptors if the ulimit
|
||||
// is low enough.)
|
||||
test_files("virtual_files", |path, open_options| {
|
||||
VirtualFile::open_with_options(path, open_options)
|
||||
let vf = VirtualFile::open_with_options(path, open_options)?;
|
||||
Ok(MaybeVirtualFile::VirtualFile(vf))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_physical_files() -> Result<(), Error> {
|
||||
#[tokio::test]
|
||||
async fn test_physical_files() -> Result<(), Error> {
|
||||
test_files("physical_files", |path, open_options| {
|
||||
open_options.open(path)
|
||||
Ok(MaybeVirtualFile::File(open_options.open(path)?))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn test_files<OF, FD>(testname: &str, openfunc: OF) -> Result<(), Error>
|
||||
async fn test_files<OF>(testname: &str, openfunc: OF) -> Result<(), Error>
|
||||
where
|
||||
FD: Read + Write + Seek + FileExt,
|
||||
OF: Fn(&Path, &OpenOptions) -> Result<FD, std::io::Error>,
|
||||
OF: Fn(&Path, &OpenOptions) -> Result<MaybeVirtualFile, std::io::Error>,
|
||||
{
|
||||
let testdir = crate::config::PageServerConf::test_repo_dir(testname);
|
||||
std::fs::create_dir_all(&testdir)?;
|
||||
@@ -528,36 +603,36 @@ mod tests {
|
||||
&path_a,
|
||||
OpenOptions::new().write(true).create(true).truncate(true),
|
||||
)?;
|
||||
file_a.write_all(b"foobar")?;
|
||||
file_a.write_all(b"foobar").await?;
|
||||
|
||||
// cannot read from a file opened in write-only mode
|
||||
assert!(read_string(&mut file_a).is_err());
|
||||
assert!(file_a.read_string().await.is_err());
|
||||
|
||||
// Close the file and re-open for reading
|
||||
let mut file_a = openfunc(&path_a, OpenOptions::new().read(true))?;
|
||||
|
||||
// cannot write to a file opened in read-only mode
|
||||
assert!(file_a.write(b"bar").is_err());
|
||||
assert!(file_a.write_all(b"bar").await.is_err());
|
||||
|
||||
// Try simple read
|
||||
assert_eq!("foobar", read_string(&mut file_a)?);
|
||||
assert_eq!("foobar", file_a.read_string().await?);
|
||||
|
||||
// It's positioned at the EOF now.
|
||||
assert_eq!("", read_string(&mut file_a)?);
|
||||
assert_eq!("", file_a.read_string().await?);
|
||||
|
||||
// Test seeks.
|
||||
assert_eq!(file_a.seek(SeekFrom::Start(1))?, 1);
|
||||
assert_eq!("oobar", read_string(&mut file_a)?);
|
||||
assert_eq!("oobar", file_a.read_string().await?);
|
||||
|
||||
assert_eq!(file_a.seek(SeekFrom::End(-2))?, 4);
|
||||
assert_eq!("ar", read_string(&mut file_a)?);
|
||||
assert_eq!("ar", file_a.read_string().await?);
|
||||
|
||||
assert_eq!(file_a.seek(SeekFrom::Start(1))?, 1);
|
||||
assert_eq!(file_a.seek(SeekFrom::Current(2))?, 3);
|
||||
assert_eq!("bar", read_string(&mut file_a)?);
|
||||
assert_eq!("bar", file_a.read_string().await?);
|
||||
|
||||
assert_eq!(file_a.seek(SeekFrom::Current(-5))?, 1);
|
||||
assert_eq!("oobar", read_string(&mut file_a)?);
|
||||
assert_eq!("oobar", file_a.read_string().await?);
|
||||
|
||||
// Test erroneous seeks to before byte 0
|
||||
assert!(file_a.seek(SeekFrom::End(-7)).is_err());
|
||||
@@ -565,7 +640,7 @@ mod tests {
|
||||
assert!(file_a.seek(SeekFrom::Current(-2)).is_err());
|
||||
|
||||
// the erroneous seek should have left the position unchanged
|
||||
assert_eq!("oobar", read_string(&mut file_a)?);
|
||||
assert_eq!("oobar", file_a.read_string().await?);
|
||||
|
||||
// Create another test file, and try FileExt functions on it.
|
||||
let path_b = testdir.join("file_b");
|
||||
@@ -577,10 +652,10 @@ mod tests {
|
||||
.create(true)
|
||||
.truncate(true),
|
||||
)?;
|
||||
file_b.write_all_at(b"BAR", 3)?;
|
||||
file_b.write_all_at(b"FOO", 0)?;
|
||||
file_b.write_all_at(b"BAR", 3).await?;
|
||||
file_b.write_all_at(b"FOO", 0).await?;
|
||||
|
||||
assert_eq!(read_string_at(&mut file_b, 2, 3)?, "OBA");
|
||||
assert_eq!(file_b.read_string_at(2, 3).await?, "OBA");
|
||||
|
||||
// Open a lot of files, enough to cause some evictions. (Or to be precise,
|
||||
// open the same file many times. The effect is the same.)
|
||||
@@ -591,7 +666,7 @@ mod tests {
|
||||
let mut vfiles = Vec::new();
|
||||
for _ in 0..100 {
|
||||
let mut vfile = openfunc(&path_b, OpenOptions::new().read(true))?;
|
||||
assert_eq!("FOOBAR", read_string(&mut vfile)?);
|
||||
assert_eq!("FOOBAR", vfile.read_string().await?);
|
||||
vfiles.push(vfile);
|
||||
}
|
||||
|
||||
@@ -600,13 +675,13 @@ mod tests {
|
||||
|
||||
// The underlying file descriptor for 'file_a' should be closed now. Try to read
|
||||
// from it again. We left the file positioned at offset 1 above.
|
||||
assert_eq!("oobar", read_string(&mut file_a)?);
|
||||
assert_eq!("oobar", file_a.read_string().await?);
|
||||
|
||||
// Check that all the other FDs still work too. Use them in random order for
|
||||
// good measure.
|
||||
vfiles.as_mut_slice().shuffle(&mut thread_rng());
|
||||
for vfile in vfiles.iter_mut() {
|
||||
assert_eq!("OOBAR", read_string_at(vfile, 1, 5)?);
|
||||
assert_eq!("OOBAR", vfile.read_string_at(1, 5).await?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -12,13 +12,19 @@ pub struct PasswordHackPayload {
|
||||
|
||||
impl PasswordHackPayload {
|
||||
pub fn parse(bytes: &[u8]) -> Option<Self> {
|
||||
// The format is `project=<utf-8>;<password-bytes>`.
|
||||
let mut iter = bytes.splitn_str(2, ";");
|
||||
let endpoint = iter.next()?.to_str().ok()?;
|
||||
let endpoint = parse_endpoint_param(endpoint)?.to_owned();
|
||||
let password = iter.next()?.to_owned();
|
||||
// The format is `project=<utf-8>;<password-bytes>` or `project=<utf-8>$<password-bytes>`.
|
||||
let separators = [";", "$"];
|
||||
for sep in separators {
|
||||
if let Some((endpoint, password)) = bytes.split_once_str(sep) {
|
||||
let endpoint = endpoint.to_str().ok()?;
|
||||
return Some(Self {
|
||||
endpoint: parse_endpoint_param(endpoint)?.to_owned(),
|
||||
password: password.to_owned(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Some(Self { endpoint, password })
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,4 +97,23 @@ mod tests {
|
||||
assert_eq!(payload.endpoint, "foobar");
|
||||
assert_eq!(payload.password, b"pass;word");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_password_hack_payload_dollar() {
|
||||
let bytes = b"";
|
||||
assert!(PasswordHackPayload::parse(bytes).is_none());
|
||||
|
||||
let bytes = b"endpoint=";
|
||||
assert!(PasswordHackPayload::parse(bytes).is_none());
|
||||
|
||||
let bytes = b"endpoint=$";
|
||||
let payload = PasswordHackPayload::parse(bytes).expect("parsing failed");
|
||||
assert_eq!(payload.endpoint, "");
|
||||
assert_eq!(payload.password, b"");
|
||||
|
||||
let bytes = b"endpoint=foobar$pass$word";
|
||||
let payload = PasswordHackPayload::parse(bytes).expect("parsing failed");
|
||||
assert_eq!(payload.endpoint, "foobar");
|
||||
assert_eq!(payload.password, b"pass$word");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,8 +63,8 @@ pub mod errors {
|
||||
format!("{REQUEST_FAILED}: endpoint is disabled")
|
||||
}
|
||||
http::StatusCode::LOCKED => {
|
||||
// Status 423: project might be in maintenance mode (or bad state).
|
||||
format!("{REQUEST_FAILED}: endpoint is temporary unavailable")
|
||||
// Status 423: project might be in maintenance mode (or bad state), or quotas exceeded.
|
||||
format!("{REQUEST_FAILED}: endpoint is temporary unavailable. check your quotas and/or contract our support")
|
||||
}
|
||||
_ => REQUEST_FAILED.to_owned(),
|
||||
},
|
||||
@@ -81,9 +81,15 @@ pub mod errors {
|
||||
// retry some temporary failures because the compute was in a bad state
|
||||
// (bad request can be returned when the endpoint was in transition)
|
||||
Self::Console {
|
||||
status: http::StatusCode::BAD_REQUEST | http::StatusCode::LOCKED,
|
||||
status: http::StatusCode::BAD_REQUEST,
|
||||
..
|
||||
} => true,
|
||||
// locked can be returned when the endpoint was in transition
|
||||
// or when quotas are exceeded. don't retry when quotas are exceeded
|
||||
Self::Console {
|
||||
status: http::StatusCode::LOCKED,
|
||||
ref text,
|
||||
} => !text.contains("quota"),
|
||||
// retry server errors
|
||||
Self::Console { status, .. } if status.is_server_error() => true,
|
||||
_ => false,
|
||||
|
||||
@@ -16,12 +16,21 @@ use tracing::{error, info, info_span, warn, Instrument};
|
||||
pub struct Api {
|
||||
endpoint: http::Endpoint,
|
||||
caches: &'static ApiCaches,
|
||||
jwt: String,
|
||||
}
|
||||
|
||||
impl Api {
|
||||
/// Construct an API object containing the auth parameters.
|
||||
pub fn new(endpoint: http::Endpoint, caches: &'static ApiCaches) -> Self {
|
||||
Self { endpoint, caches }
|
||||
let jwt: String = match std::env::var("NEON_PROXY_TO_CONTROLPLANE_TOKEN") {
|
||||
Ok(v) => v,
|
||||
Err(_) => "".to_string(),
|
||||
};
|
||||
Self {
|
||||
endpoint,
|
||||
caches,
|
||||
jwt,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn url(&self) -> &str {
|
||||
@@ -39,6 +48,7 @@ impl Api {
|
||||
.endpoint
|
||||
.get("proxy_get_role_secret")
|
||||
.header("X-Request-ID", &request_id)
|
||||
.header("Authorization", &self.jwt)
|
||||
.query(&[("session_id", extra.session_id)])
|
||||
.query(&[
|
||||
("application_name", extra.application_name),
|
||||
@@ -83,6 +93,7 @@ impl Api {
|
||||
.endpoint
|
||||
.get("proxy_wake_compute")
|
||||
.header("X-Request-ID", &request_id)
|
||||
.header("Authorization", &self.jwt)
|
||||
.query(&[("session_id", extra.session_id)])
|
||||
.query(&[
|
||||
("application_name", extra.application_name),
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::{
|
||||
cancellation::CancelMap,
|
||||
config::ProxyConfig,
|
||||
error::io_error,
|
||||
protocol2::{ProxyProtocolAccept, WithClientIp},
|
||||
proxy::{handle_client, ClientMode},
|
||||
};
|
||||
use bytes::{Buf, Bytes};
|
||||
@@ -292,6 +293,9 @@ pub async fn task_main(
|
||||
|
||||
let mut addr_incoming = AddrIncoming::from_listener(ws_listener)?;
|
||||
let _ = addr_incoming.set_nodelay(true);
|
||||
let addr_incoming = ProxyProtocolAccept {
|
||||
incoming: addr_incoming,
|
||||
};
|
||||
|
||||
let tls_listener = TlsListener::new(tls_acceptor, addr_incoming).filter(|conn| {
|
||||
if let Err(err) = conn {
|
||||
@@ -302,9 +306,11 @@ pub async fn task_main(
|
||||
}
|
||||
});
|
||||
|
||||
let make_svc =
|
||||
hyper::service::make_service_fn(|stream: &tokio_rustls::server::TlsStream<AddrStream>| {
|
||||
let sni_name = stream.get_ref().1.sni_hostname().map(|s| s.to_string());
|
||||
let make_svc = hyper::service::make_service_fn(
|
||||
|stream: &tokio_rustls::server::TlsStream<WithClientIp<AddrStream>>| {
|
||||
let (io, tls) = stream.get_ref();
|
||||
let peer_addr = io.client_addr().unwrap_or(io.inner.remote_addr());
|
||||
let sni_name = tls.server_name().map(|s| s.to_string());
|
||||
let conn_pool = conn_pool.clone();
|
||||
|
||||
async move {
|
||||
@@ -319,13 +325,15 @@ pub async fn task_main(
|
||||
ws_handler(req, config, conn_pool, cancel_map, session_id, sni_name)
|
||||
.instrument(info_span!(
|
||||
"ws-client",
|
||||
session = %session_id
|
||||
session = %session_id,
|
||||
%peer_addr,
|
||||
))
|
||||
.await
|
||||
}
|
||||
}))
|
||||
}
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
hyper::Server::builder(accept::from_stream(tls_listener))
|
||||
.serve(make_svc)
|
||||
|
||||
@@ -16,6 +16,7 @@ pub mod http;
|
||||
pub mod logging;
|
||||
pub mod metrics;
|
||||
pub mod parse;
|
||||
pub mod protocol2;
|
||||
pub mod proxy;
|
||||
pub mod sasl;
|
||||
pub mod scram;
|
||||
|
||||
479
proxy/src/protocol2.rs
Normal file
479
proxy/src/protocol2.rs
Normal file
@@ -0,0 +1,479 @@
|
||||
//! Proxy Protocol V2 implementation
|
||||
|
||||
use std::{
|
||||
future::poll_fn,
|
||||
future::Future,
|
||||
io,
|
||||
net::SocketAddr,
|
||||
pin::{pin, Pin},
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::{Buf, BytesMut};
|
||||
use hyper::server::conn::{AddrIncoming, AddrStream};
|
||||
use pin_project_lite::pin_project;
|
||||
use tls_listener::AsyncAccept;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, ReadBuf};
|
||||
|
||||
pub struct ProxyProtocolAccept {
|
||||
pub incoming: AddrIncoming,
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
pub struct WithClientIp<T> {
|
||||
#[pin]
|
||||
pub inner: T,
|
||||
buf: BytesMut,
|
||||
tlv_bytes: u16,
|
||||
state: ProxyParse,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
enum ProxyParse {
|
||||
NotStarted,
|
||||
|
||||
Finished(SocketAddr),
|
||||
None,
|
||||
}
|
||||
|
||||
impl<T: AsyncWrite> AsyncWrite for WithClientIp<T> {
|
||||
#[inline]
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, io::Error>> {
|
||||
self.project().inner.poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
self.project().inner.poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
self.project().inner.poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> Poll<Result<usize, io::Error>> {
|
||||
self.project().inner.poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.inner.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> WithClientIp<T> {
|
||||
pub fn new(inner: T) -> Self {
|
||||
WithClientIp {
|
||||
inner,
|
||||
buf: BytesMut::with_capacity(128),
|
||||
tlv_bytes: 0,
|
||||
state: ProxyParse::NotStarted,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn client_addr(&self) -> Option<SocketAddr> {
|
||||
match self.state {
|
||||
ProxyParse::Finished(socket) => Some(socket),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + Unpin> WithClientIp<T> {
|
||||
pub async fn wait_for_addr(&mut self) -> io::Result<Option<SocketAddr>> {
|
||||
match self.state {
|
||||
ProxyParse::NotStarted => {
|
||||
let mut pin = Pin::new(&mut *self);
|
||||
let addr = poll_fn(|cx| pin.as_mut().poll_client_ip(cx)).await?;
|
||||
match addr {
|
||||
Some(addr) => self.state = ProxyParse::Finished(addr),
|
||||
None => self.state = ProxyParse::None,
|
||||
}
|
||||
Ok(addr)
|
||||
}
|
||||
ProxyParse::Finished(addr) => Ok(Some(addr)),
|
||||
ProxyParse::None => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Proxy Protocol Version 2 Header
|
||||
const HEADER: [u8; 12] = [
|
||||
0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A,
|
||||
];
|
||||
|
||||
impl<T: AsyncRead> WithClientIp<T> {
|
||||
/// implementation of <https://www.haproxy.org/download/2.4/doc/proxy-protocol.txt>
|
||||
/// Version 2 (Binary Format)
|
||||
fn poll_client_ip(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<Option<SocketAddr>>> {
|
||||
// The binary header format starts with a constant 12 bytes block containing the protocol signature :
|
||||
// \x0D \x0A \x0D \x0A \x00 \x0D \x0A \x51 \x55 \x49 \x54 \x0A
|
||||
while self.buf.len() < 16 {
|
||||
let mut this = self.as_mut().project();
|
||||
let bytes_read = pin!(this.inner.read_buf(this.buf)).poll(cx)?;
|
||||
|
||||
// exit for bad header
|
||||
let len = usize::min(self.buf.len(), HEADER.len());
|
||||
if self.buf[..len] != HEADER[..len] {
|
||||
return Poll::Ready(Ok(None));
|
||||
}
|
||||
|
||||
// if no more bytes available then exit
|
||||
if ready!(bytes_read) == 0 {
|
||||
return Poll::Ready(Ok(None));
|
||||
};
|
||||
}
|
||||
|
||||
// The next byte (the 13th one) is the protocol version and command.
|
||||
// The highest four bits contains the version. As of this specification, it must
|
||||
// always be sent as \x2 and the receiver must only accept this value.
|
||||
let vc = self.buf[12];
|
||||
let version = vc >> 4;
|
||||
let command = vc & 0b1111;
|
||||
if version != 2 {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"invalid proxy protocol version. expected version 2",
|
||||
)));
|
||||
}
|
||||
match command {
|
||||
// the connection was established on purpose by the proxy
|
||||
// without being relayed. The connection endpoints are the sender and the
|
||||
// receiver. Such connections exist when the proxy sends health-checks to the
|
||||
// server. The receiver must accept this connection as valid and must use the
|
||||
// real connection endpoints and discard the protocol block including the
|
||||
// family which is ignored.
|
||||
0 => {}
|
||||
// the connection was established on behalf of another node,
|
||||
// and reflects the original connection endpoints. The receiver must then use
|
||||
// the information provided in the protocol block to get original the address.
|
||||
1 => {}
|
||||
// other values are unassigned and must not be emitted by senders. Receivers
|
||||
// must drop connections presenting unexpected values here.
|
||||
_ => {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"invalid proxy protocol command. expected local (0) or proxy (1)",
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
// The 14th byte contains the transport protocol and address family. The highest 4
|
||||
// bits contain the address family, the lowest 4 bits contain the protocol.
|
||||
let ft = self.buf[13];
|
||||
let address_length = match ft {
|
||||
// - \x11 : TCP over IPv4 : the forwarded connection uses TCP over the AF_INET
|
||||
// protocol family. Address length is 2*4 + 2*2 = 12 bytes.
|
||||
// - \x12 : UDP over IPv4 : the forwarded connection uses UDP over the AF_INET
|
||||
// protocol family. Address length is 2*4 + 2*2 = 12 bytes.
|
||||
0x11 | 0x12 => 12,
|
||||
// - \x21 : TCP over IPv6 : the forwarded connection uses TCP over the AF_INET6
|
||||
// protocol family. Address length is 2*16 + 2*2 = 36 bytes.
|
||||
// - \x22 : UDP over IPv6 : the forwarded connection uses UDP over the AF_INET6
|
||||
// protocol family. Address length is 2*16 + 2*2 = 36 bytes.
|
||||
0x21 | 0x22 => 36,
|
||||
// unspecified or unix stream. ignore the addresses
|
||||
_ => 0,
|
||||
};
|
||||
|
||||
// The 15th and 16th bytes is the address length in bytes in network endian order.
|
||||
// It is used so that the receiver knows how many address bytes to skip even when
|
||||
// it does not implement the presented protocol. Thus the length of the protocol
|
||||
// header in bytes is always exactly 16 + this value. When a sender presents a
|
||||
// LOCAL connection, it should not present any address so it sets this field to
|
||||
// zero. Receivers MUST always consider this field to skip the appropriate number
|
||||
// of bytes and must not assume zero is presented for LOCAL connections. When a
|
||||
// receiver accepts an incoming connection showing an UNSPEC address family or
|
||||
// protocol, it may or may not decide to log the address information if present.
|
||||
let remaining_length = u16::from_be_bytes(self.buf[14..16].try_into().unwrap());
|
||||
if remaining_length < address_length {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"invalid proxy protocol length. not enough to fit requested IP addresses",
|
||||
)));
|
||||
}
|
||||
|
||||
while self.buf.len() < 16 + address_length as usize {
|
||||
let mut this = self.as_mut().project();
|
||||
if ready!(pin!(this.inner.read_buf(this.buf)).poll(cx)?) == 0 {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"stream closed while waiting for proxy protocol addresses",
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
let this = self.as_mut().project();
|
||||
|
||||
// we are sure this is a proxy protocol v2 entry and we have read all the bytes we need
|
||||
// discard the header we have parsed
|
||||
this.buf.advance(16);
|
||||
|
||||
// Starting from the 17th byte, addresses are presented in network byte order.
|
||||
// The address order is always the same :
|
||||
// - source layer 3 address in network byte order
|
||||
// - destination layer 3 address in network byte order
|
||||
// - source layer 4 address if any, in network byte order (port)
|
||||
// - destination layer 4 address if any, in network byte order (port)
|
||||
let addresses = this.buf.split_to(address_length as usize);
|
||||
let socket = match address_length {
|
||||
12 => {
|
||||
let src_addr: [u8; 4] = addresses[0..4].try_into().unwrap();
|
||||
let src_port = u16::from_be_bytes(addresses[8..10].try_into().unwrap());
|
||||
Some(SocketAddr::from((src_addr, src_port)))
|
||||
}
|
||||
36 => {
|
||||
let src_addr: [u8; 16] = addresses[0..16].try_into().unwrap();
|
||||
let src_port = u16::from_be_bytes(addresses[32..34].try_into().unwrap());
|
||||
Some(SocketAddr::from((src_addr, src_port)))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
*this.tlv_bytes = remaining_length - address_length;
|
||||
self.as_mut().skip_tlv_inner();
|
||||
|
||||
Poll::Ready(Ok(socket))
|
||||
}
|
||||
|
||||
#[cold]
|
||||
fn read_ip(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
let ip = ready!(self.as_mut().poll_client_ip(cx)?);
|
||||
match ip {
|
||||
Some(x) => *self.as_mut().project().state = ProxyParse::Finished(x),
|
||||
None => *self.as_mut().project().state = ProxyParse::None,
|
||||
}
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[cold]
|
||||
fn skip_tlv(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
let mut this = self.as_mut().project();
|
||||
// we know that this.buf is empty
|
||||
debug_assert_eq!(this.buf.len(), 0);
|
||||
|
||||
this.buf.reserve((*this.tlv_bytes).clamp(0, 1024) as usize);
|
||||
ready!(pin!(this.inner.read_buf(this.buf)).poll(cx)?);
|
||||
self.skip_tlv_inner();
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn skip_tlv_inner(self: Pin<&mut Self>) {
|
||||
let tlv_bytes_read = match u16::try_from(self.buf.len()) {
|
||||
// we read more than u16::MAX therefore we must have read the full tlv_bytes
|
||||
Err(_) => self.tlv_bytes,
|
||||
// we might not have read the full tlv bytes yet
|
||||
Ok(n) => u16::min(n, self.tlv_bytes),
|
||||
};
|
||||
let this = self.project();
|
||||
*this.tlv_bytes -= tlv_bytes_read;
|
||||
this.buf.advance(tlv_bytes_read as usize);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead> AsyncRead for WithClientIp<T> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
// I'm assuming these 3 comparisons will be easy to branch predict.
|
||||
// especially with the cold attributes
|
||||
// which should make this read wrapper almost invisible
|
||||
|
||||
if let ProxyParse::NotStarted = self.state {
|
||||
ready!(self.as_mut().read_ip(cx)?);
|
||||
}
|
||||
|
||||
while self.tlv_bytes > 0 {
|
||||
ready!(self.as_mut().skip_tlv(cx)?)
|
||||
}
|
||||
|
||||
let this = self.project();
|
||||
if this.buf.is_empty() {
|
||||
this.inner.poll_read(cx, buf)
|
||||
} else {
|
||||
// we know that tlv_bytes is 0
|
||||
debug_assert_eq!(*this.tlv_bytes, 0);
|
||||
|
||||
let write = usize::min(this.buf.len(), buf.remaining());
|
||||
let slice = this.buf.split_to(write).freeze();
|
||||
buf.put_slice(&slice);
|
||||
|
||||
// reset the allocation so it can be freed
|
||||
if this.buf.is_empty() {
|
||||
*this.buf = BytesMut::new();
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncAccept for ProxyProtocolAccept {
|
||||
type Connection = WithClientIp<AddrStream>;
|
||||
|
||||
type Error = io::Error;
|
||||
|
||||
fn poll_accept(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Self::Connection, Self::Error>>> {
|
||||
let conn = ready!(Pin::new(&mut self.incoming).poll_accept(cx)?);
|
||||
let Some(conn) = conn else {
|
||||
return Poll::Ready(None);
|
||||
};
|
||||
|
||||
Poll::Ready(Some(Ok(WithClientIp::new(conn))))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::pin::pin;
|
||||
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use crate::protocol2::{ProxyParse, WithClientIp};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ipv4() {
|
||||
let header = super::HEADER
|
||||
// Proxy command, IPV4 | TCP
|
||||
.chain([(2 << 4) | 1, (1 << 4) | 1].as_slice())
|
||||
// 12 + 3 bytes
|
||||
.chain([0, 15].as_slice())
|
||||
// src ip
|
||||
.chain([127, 0, 0, 1].as_slice())
|
||||
// dst ip
|
||||
.chain([192, 168, 0, 1].as_slice())
|
||||
// src port
|
||||
.chain([255, 255].as_slice())
|
||||
// dst port
|
||||
.chain([1, 1].as_slice())
|
||||
// TLV
|
||||
.chain([1, 2, 3].as_slice());
|
||||
|
||||
let extra_data = [0x55; 256];
|
||||
|
||||
let mut read = pin!(WithClientIp::new(header.chain(extra_data.as_slice())));
|
||||
|
||||
let mut bytes = vec![];
|
||||
read.read_to_end(&mut bytes).await.unwrap();
|
||||
|
||||
assert_eq!(bytes, extra_data);
|
||||
assert_eq!(
|
||||
read.state,
|
||||
ProxyParse::Finished(([127, 0, 0, 1], 65535).into())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ipv6() {
|
||||
let header = super::HEADER
|
||||
// Proxy command, IPV6 | UDP
|
||||
.chain([(2 << 4) | 1, (2 << 4) | 2].as_slice())
|
||||
// 36 + 3 bytes
|
||||
.chain([0, 39].as_slice())
|
||||
// src ip
|
||||
.chain([15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0].as_slice())
|
||||
// dst ip
|
||||
.chain([0, 15, 1, 14, 2, 13, 3, 12, 4, 11, 5, 10, 6, 9, 7, 8].as_slice())
|
||||
// src port
|
||||
.chain([1, 1].as_slice())
|
||||
// dst port
|
||||
.chain([255, 255].as_slice())
|
||||
// TLV
|
||||
.chain([1, 2, 3].as_slice());
|
||||
|
||||
let extra_data = [0x55; 256];
|
||||
|
||||
let mut read = pin!(WithClientIp::new(header.chain(extra_data.as_slice())));
|
||||
|
||||
let mut bytes = vec![];
|
||||
read.read_to_end(&mut bytes).await.unwrap();
|
||||
|
||||
assert_eq!(bytes, extra_data);
|
||||
assert_eq!(
|
||||
read.state,
|
||||
ProxyParse::Finished(
|
||||
([15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], 257).into()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid() {
|
||||
let data = [0x55; 256];
|
||||
|
||||
let mut read = pin!(WithClientIp::new(data.as_slice()));
|
||||
|
||||
let mut bytes = vec![];
|
||||
read.read_to_end(&mut bytes).await.unwrap();
|
||||
assert_eq!(bytes, data);
|
||||
assert_eq!(read.state, ProxyParse::None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_short() {
|
||||
let data = [0x55; 10];
|
||||
|
||||
let mut read = pin!(WithClientIp::new(data.as_slice()));
|
||||
|
||||
let mut bytes = vec![];
|
||||
read.read_to_end(&mut bytes).await.unwrap();
|
||||
assert_eq!(bytes, data);
|
||||
assert_eq!(read.state, ProxyParse::None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_large_tlv() {
|
||||
let tlv = vec![0x55; 32768];
|
||||
let len = (12 + tlv.len() as u16).to_be_bytes();
|
||||
|
||||
let header = super::HEADER
|
||||
// Proxy command, Inet << 4 | Stream
|
||||
.chain([(2 << 4) | 1, (1 << 4) | 1].as_slice())
|
||||
// 12 + 3 bytes
|
||||
.chain(len.as_slice())
|
||||
// src ip
|
||||
.chain([55, 56, 57, 58].as_slice())
|
||||
// dst ip
|
||||
.chain([192, 168, 0, 1].as_slice())
|
||||
// src port
|
||||
.chain([255, 255].as_slice())
|
||||
// dst port
|
||||
.chain([1, 1].as_slice())
|
||||
// TLV
|
||||
.chain(tlv.as_slice());
|
||||
|
||||
let extra_data = [0xaa; 256];
|
||||
|
||||
let mut read = pin!(WithClientIp::new(header.chain(extra_data.as_slice())));
|
||||
|
||||
let mut bytes = vec![];
|
||||
read.read_to_end(&mut bytes).await.unwrap();
|
||||
|
||||
assert_eq!(bytes, extra_data);
|
||||
assert_eq!(
|
||||
read.state,
|
||||
ProxyParse::Finished(([55, 56, 57, 58], 65535).into())
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ use crate::{
|
||||
compute::{self, PostgresConnection},
|
||||
config::{ProxyConfig, TlsConfig},
|
||||
console::{self, errors::WakeComputeError, messages::MetricsAuxInfo, Api},
|
||||
protocol2::WithClientIp,
|
||||
stream::{PqStream, Stream},
|
||||
};
|
||||
use anyhow::{bail, Context};
|
||||
@@ -100,7 +101,7 @@ pub async fn task_main(
|
||||
loop {
|
||||
tokio::select! {
|
||||
accept_result = listener.accept() => {
|
||||
let (socket, peer_addr) = accept_result?;
|
||||
let (socket, _) = accept_result?;
|
||||
|
||||
let session_id = uuid::Uuid::new_v4();
|
||||
let cancel_map = Arc::clone(&cancel_map);
|
||||
@@ -108,13 +109,19 @@ pub async fn task_main(
|
||||
async move {
|
||||
info!("accepted postgres client connection");
|
||||
|
||||
let mut socket = WithClientIp::new(socket);
|
||||
if let Some(ip) = socket.wait_for_addr().await? {
|
||||
tracing::Span::current().record("peer_addr", &tracing::field::display(ip));
|
||||
}
|
||||
|
||||
socket
|
||||
.inner
|
||||
.set_nodelay(true)
|
||||
.context("failed to set socket option")?;
|
||||
|
||||
handle_client(config, &cancel_map, session_id, socket, ClientMode::Tcp).await
|
||||
}
|
||||
.instrument(info_span!("handle_client", ?session_id, %peer_addr))
|
||||
.instrument(info_span!("handle_client", ?session_id, peer_addr = tracing::field::Empty))
|
||||
.unwrap_or_else(move |e| {
|
||||
// Acknowledge that the task has finished with an error.
|
||||
error!(?session_id, "per-client task finished with an error: {e:#}");
|
||||
|
||||
@@ -137,6 +137,7 @@ async fn dummy_proxy(
|
||||
auth: impl TestAuth + Send,
|
||||
) -> anyhow::Result<()> {
|
||||
let cancel_map = CancelMap::default();
|
||||
let client = WithClientIp::new(client);
|
||||
let (mut stream, _params) = handshake(client, tls.as_ref(), &cancel_map)
|
||||
.await?
|
||||
.context("handshake failed")?;
|
||||
|
||||
@@ -141,7 +141,7 @@ impl<S> Stream<S> {
|
||||
pub fn sni_hostname(&self) -> Option<&str> {
|
||||
match self {
|
||||
Stream::Raw { .. } => None,
|
||||
Stream::Tls { tls } => tls.get_ref().1.sni_hostname(),
|
||||
Stream::Tls { tls } => tls.get_ref().1.server_name(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
39
s3_scrubber/Cargo.toml
Normal file
39
s3_scrubber/Cargo.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "s3_scrubber"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
aws-sdk-s3.workspace = true
|
||||
aws-smithy-http.workspace = true
|
||||
aws-types.workspace = true
|
||||
either.workspace = true
|
||||
tokio-rustls.workspace = true
|
||||
anyhow.workspace = true
|
||||
hex.workspace = true
|
||||
thiserror.workspace = true
|
||||
rand.workspace = true
|
||||
bytes.workspace = true
|
||||
bincode.workspace = true
|
||||
crc32c.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
workspace_hack.workspace = true
|
||||
utils.workspace = true
|
||||
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
chrono = { workspace = true, default-features = false, features = ["clock", "serde"] }
|
||||
reqwest = { workspace = true, default-features = false, features = ["rustls-tls", "json"] }
|
||||
aws-config = { workspace = true, default-features = false, features = ["rustls", "credentials-sso"] }
|
||||
|
||||
pageserver = {path="../pageserver"}
|
||||
|
||||
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
clap.workspace = true
|
||||
|
||||
atty = "0.2"
|
||||
tracing-appender = "0.2"
|
||||
93
s3_scrubber/README.md
Normal file
93
s3_scrubber/README.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Neon S3 scrubber
|
||||
|
||||
This tool directly accesses the S3 buckets used by the Neon `pageserver`
|
||||
and `safekeeper`, and does housekeeping such as cleaning up objects for tenants & timelines that no longer exist.
|
||||
|
||||
## Usage
|
||||
|
||||
### Generic Parameters
|
||||
|
||||
#### S3
|
||||
|
||||
Do `aws sso login --profile dev` to get the SSO access to the bucket to clean, get the SSO_ACCOUNT_ID for your profile (`cat ~/.aws/config` may help).
|
||||
|
||||
- `SSO_ACCOUNT_ID`: Credentials id to use for accessing S3 buckets
|
||||
- `REGION`: A region where the bucket is located at.
|
||||
- `BUCKET`: Bucket name
|
||||
|
||||
#### Console API
|
||||
|
||||
_This section is only relevant if using a command that requires access to Neon's internal control plane_
|
||||
|
||||
- `CLOUD_ADMIN_API_URL`: The URL base to use for checking tenant/timeline for existence via the Cloud API. e.g. `https://<admin host>/admin`
|
||||
|
||||
- `CLOUD_ADMIN_API_TOKEN`: The token to provide when querying the admin API. Get one on the corresponding console page, e.g. `https://<admin host>/app/settings/api-keys`
|
||||
|
||||
### Commands
|
||||
|
||||
#### `tidy`
|
||||
|
||||
Iterate over S3 buckets for storage nodes, checking their contents and removing the data not present in the console. Node S3 data that's not removed is then further checked for discrepancies and, sometimes, validated.
|
||||
|
||||
Unless the global `--delete` argument is provided, this command only dry-runs and logs
|
||||
what it would have deleted.
|
||||
|
||||
```
|
||||
tidy --node-kind=<safekeeper|pageserver> [--depth=<tenant|timeline>] [--skip-validation]
|
||||
```
|
||||
|
||||
- `--node-kind`: whether to inspect safekeeper or pageserver bucket prefix
|
||||
- `--depth`: whether to only search for deletable tenants, or also search for
|
||||
deletable timelines within active tenants. Default: `tenant`
|
||||
- `--skip-validation`: skip additional post-deletion checks. Default: `false`
|
||||
|
||||
For a selected S3 path, the tool lists the S3 bucket given for either tenants or both tenants and timelines — for every found entry, console API is queried: any deleted or missing in the API entity is scheduled for deletion from S3.
|
||||
|
||||
If validation is enabled, only the non-deleted tenants' ones are checked.
|
||||
For pageserver, timelines' index_part.json on S3 is also checked for various discrepancies: no files are removed, even if there are "extra" S3 files not present in index_part.json: due to the way pageserver updates the remote storage, it's better to do such removals manually, stopping the corresponding tenant first.
|
||||
|
||||
Command examples:
|
||||
|
||||
`env SSO_ACCOUNT_ID=369495373322 REGION=eu-west-1 BUCKET=neon-dev-storage-eu-west-1 CLOUD_ADMIN_API_TOKEN=${NEON_CLOUD_ADMIN_API_STAGING_KEY} CLOUD_ADMIN_API_URL=[url] cargo run --release -- tidy --node-kind=safekeeper`
|
||||
|
||||
`env SSO_ACCOUNT_ID=369495373322 REGION=us-east-2 BUCKET=neon-staging-storage-us-east-2 CLOUD_ADMIN_API_TOKEN=${NEON_CLOUD_ADMIN_API_STAGING_KEY} CLOUD_ADMIN_API_URL=[url] cargo run --release -- tidy --node-kind=pageserver --depth=timeline`
|
||||
|
||||
When dry run stats look satisfying, use `-- --delete` before the `tidy` command to
|
||||
disable dry run and run the binary with deletion enabled.
|
||||
|
||||
See these lines (and lines around) in the logs for the final stats:
|
||||
|
||||
- `Finished listing the bucket for tenants`
|
||||
- `Finished active tenant and timeline validation`
|
||||
- `Total tenant deletion stats`
|
||||
- `Total timeline deletion stats`
|
||||
|
||||
## Current implementation details
|
||||
|
||||
- The tool does not have any peristent state currently: instead, it creates very verbose logs, with every S3 delete request logged, every tenant/timeline id check, etc.
|
||||
Worse, any panic or early errored tasks might force the tool to exit without printing the final summary — all affected ids will still be in the logs though. The tool has retries inside it, so it's error-resistant up to some extent, and recent runs showed no traces of errors/panics.
|
||||
|
||||
- Instead of checking non-deleted tenants' timelines instantly, the tool attempts to create separate tasks (futures) for that,
|
||||
complicating the logic and slowing down the process, this should be fixed and done in one "task".
|
||||
|
||||
- The tool does uses only publicly available remote resources (S3, console) and does not access pageserver/safekeeper nodes themselves.
|
||||
Yet, its S3 set up should be prepared for running on any pageserver/safekeeper node, using node's S3 credentials, so the node API access logic could be implemented relatively simply on top.
|
||||
|
||||
## Cleanup procedure:
|
||||
|
||||
### Pageserver preparations
|
||||
|
||||
If S3 state is altered first manually, pageserver in-memory state will contain wrong data about S3 state, and tenants/timelines may get recreated on S3 (due to any layer upload due to compaction, pageserver restart, etc.). So before proceeding, for tenants/timelines which are already deleted in the console, we must remove these from pageservers.
|
||||
|
||||
First, we need to group pageservers by buckets, `https://<admin host>/admin/pageservers`` can be used for all env nodes, then `cat /storage/pageserver/data/pageserver.toml` on every node will show the bucket names and regions needed.
|
||||
|
||||
Per bucket, for every pageserver id related, find deleted tenants:
|
||||
|
||||
`curl -X POST "https://<admin_host>/admin/check_pageserver/{id}" -H "Accept: application/json" -H "Authorization: Bearer ${NEON_CLOUD_ADMIN_API_STAGING_KEY}" | jq`
|
||||
|
||||
use `?check_timelines=true` to find deleted timelines, but the check runs a separate query on every alive tenant, so that could be long and time out for big pageservers.
|
||||
|
||||
Note that some tenants/timelines could be marked as deleted in console, but console might continue querying the node later to fully remove the tenant/timeline: wait for some time before ensuring that the "extra" tenant/timeline is not going away by itself.
|
||||
|
||||
When all IDs are collected, manually go to every pageserver and detach/delete the tenant/timeline.
|
||||
In future, the cleanup tool may access pageservers directly, but now it's only console and S3 it has access to.
|
||||
438
s3_scrubber/src/checks.rs
Normal file
438
s3_scrubber/src/checks.rs
Normal file
@@ -0,0 +1,438 @@
|
||||
use std::collections::{hash_map, HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aws_sdk_s3::Client;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::task::JoinSet;
|
||||
use tracing::{error, info, info_span, warn, Instrument};
|
||||
|
||||
use crate::cloud_admin_api::{BranchData, CloudAdminApiClient, ProjectId};
|
||||
use crate::delete_batch_producer::DeleteProducerStats;
|
||||
use crate::{list_objects_with_retries, RootTarget, MAX_RETRIES};
|
||||
use pageserver::tenant::storage_layer::LayerFileName;
|
||||
use pageserver::tenant::IndexPart;
|
||||
use utils::id::TenantTimelineId;
|
||||
|
||||
pub async fn validate_pageserver_active_tenant_and_timelines(
|
||||
s3_client: Arc<Client>,
|
||||
s3_root: RootTarget,
|
||||
admin_client: Arc<CloudAdminApiClient>,
|
||||
batch_producer_stats: DeleteProducerStats,
|
||||
) -> anyhow::Result<BranchCheckStats> {
|
||||
let Some(timeline_stats) = batch_producer_stats.timeline_stats else {
|
||||
info!("No tenant-only checks, exiting");
|
||||
return Ok(BranchCheckStats::default());
|
||||
};
|
||||
|
||||
let s3_active_projects = batch_producer_stats
|
||||
.tenant_stats
|
||||
.active_entries
|
||||
.into_iter()
|
||||
.map(|project| (project.id.clone(), project))
|
||||
.collect::<HashMap<_, _>>();
|
||||
info!("Validating {} active tenants", s3_active_projects.len());
|
||||
|
||||
let mut s3_active_branches_per_project = HashMap::<ProjectId, Vec<BranchData>>::new();
|
||||
let mut s3_blob_data = HashMap::<TenantTimelineId, S3TimelineBlobData>::new();
|
||||
for active_branch in timeline_stats.active_entries {
|
||||
let active_project_id = active_branch.project_id.clone();
|
||||
let active_branch_id = active_branch.id.clone();
|
||||
let active_timeline_id = active_branch.timeline_id;
|
||||
|
||||
s3_active_branches_per_project
|
||||
.entry(active_project_id.clone())
|
||||
.or_default()
|
||||
.push(active_branch);
|
||||
|
||||
let Some(active_project) = s3_active_projects.get(&active_project_id) else {
|
||||
error!("Branch {:?} for project {:?} has no such project in the active projects", active_branch_id, active_project_id);
|
||||
continue;
|
||||
};
|
||||
|
||||
let id = TenantTimelineId::new(active_project.tenant, active_timeline_id);
|
||||
s3_blob_data.insert(
|
||||
id,
|
||||
list_timeline_blobs(&s3_client, id, &s3_root)
|
||||
.await
|
||||
.with_context(|| format!("List timeline {id} blobs"))?,
|
||||
);
|
||||
}
|
||||
|
||||
let mut branch_checks = JoinSet::new();
|
||||
for (_, s3_active_project) in s3_active_projects {
|
||||
let project_id = &s3_active_project.id;
|
||||
let tenant_id = s3_active_project.tenant;
|
||||
|
||||
let mut console_active_branches =
|
||||
branches_for_project_with_retries(&admin_client, project_id)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Client API branches for project {project_id:?} retrieval")
|
||||
})?
|
||||
.into_iter()
|
||||
.map(|branch| (branch.id.clone(), branch))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let active_branches = s3_active_branches_per_project
|
||||
.remove(project_id)
|
||||
.unwrap_or_default();
|
||||
info!(
|
||||
"Spawning tasks for {} tenant {} active timelines",
|
||||
active_branches.len(),
|
||||
tenant_id
|
||||
);
|
||||
for s3_active_branch in active_branches {
|
||||
let console_branch = console_active_branches.remove(&s3_active_branch.id);
|
||||
let timeline_id = s3_active_branch.timeline_id;
|
||||
let id = TenantTimelineId::new(tenant_id, timeline_id);
|
||||
let s3_data = s3_blob_data.remove(&id);
|
||||
let s3_root = s3_root.clone();
|
||||
branch_checks.spawn(
|
||||
async move {
|
||||
let check_errors = branch_cleanup_and_check_errors(
|
||||
id,
|
||||
&s3_root,
|
||||
&s3_active_branch,
|
||||
console_branch,
|
||||
s3_data,
|
||||
)
|
||||
.await;
|
||||
(id, check_errors)
|
||||
}
|
||||
.instrument(info_span!("check_timeline", id = %id)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut total_stats = BranchCheckStats::default();
|
||||
while let Some((id, branch_check_errors)) = branch_checks
|
||||
.join_next()
|
||||
.await
|
||||
.transpose()
|
||||
.context("branch check task join")?
|
||||
{
|
||||
total_stats.add(id, branch_check_errors);
|
||||
}
|
||||
Ok(total_stats)
|
||||
}
|
||||
|
||||
async fn branches_for_project_with_retries(
|
||||
admin_client: &CloudAdminApiClient,
|
||||
project_id: &ProjectId,
|
||||
) -> anyhow::Result<Vec<BranchData>> {
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match admin_client.branches_for_project(project_id, false).await {
|
||||
Ok(branches) => return Ok(branches),
|
||||
Err(e) => {
|
||||
error!("admin list branches for project {project_id:?} query failed: {e}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("Failed to list branches for project {project_id:?} {MAX_RETRIES} times")
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BranchCheckStats {
|
||||
pub timelines_with_errors: HashMap<TenantTimelineId, Vec<String>>,
|
||||
pub normal_timelines: HashSet<TenantTimelineId>,
|
||||
}
|
||||
|
||||
impl BranchCheckStats {
|
||||
pub fn add(&mut self, id: TenantTimelineId, check_errors: Vec<String>) {
|
||||
if check_errors.is_empty() {
|
||||
if !self.normal_timelines.insert(id) {
|
||||
panic!("Checking branch with timeline {id} more than once")
|
||||
}
|
||||
} else {
|
||||
match self.timelines_with_errors.entry(id) {
|
||||
hash_map::Entry::Occupied(_) => {
|
||||
panic!("Checking branch with timeline {id} more than once")
|
||||
}
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
v.insert(check_errors);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn branch_cleanup_and_check_errors(
|
||||
id: TenantTimelineId,
|
||||
s3_root: &RootTarget,
|
||||
s3_active_branch: &BranchData,
|
||||
console_branch: Option<BranchData>,
|
||||
s3_data: Option<S3TimelineBlobData>,
|
||||
) -> Vec<String> {
|
||||
info!(
|
||||
"Checking timeline for branch branch {:?}/{:?}",
|
||||
s3_active_branch.project_id, s3_active_branch.id
|
||||
);
|
||||
let mut branch_check_errors = Vec::new();
|
||||
|
||||
match console_branch {
|
||||
Some(console_active_branch) => {
|
||||
if console_active_branch.deleted {
|
||||
branch_check_errors.push(format!("Timeline has deleted branch data in the console (id = {:?}, project_id = {:?}), recheck whether if it got removed during the check",
|
||||
s3_active_branch.id, s3_active_branch.project_id))
|
||||
}
|
||||
},
|
||||
None => branch_check_errors.push(format!("Timeline has no branch data in the console (id = {:?}, project_id = {:?}), recheck whether if it got removed during the check",
|
||||
s3_active_branch.id, s3_active_branch.project_id))
|
||||
}
|
||||
|
||||
let mut keys_to_remove = Vec::new();
|
||||
|
||||
match s3_data {
|
||||
Some(s3_data) => {
|
||||
keys_to_remove.extend(s3_data.keys_to_remove);
|
||||
|
||||
match s3_data.blob_data {
|
||||
BlobDataParseResult::Parsed {
|
||||
index_part,
|
||||
mut s3_layers,
|
||||
} => {
|
||||
if !IndexPart::KNOWN_VERSIONS.contains(&index_part.get_version()) {
|
||||
branch_check_errors.push(format!(
|
||||
"index_part.json version: {}",
|
||||
index_part.get_version()
|
||||
))
|
||||
}
|
||||
|
||||
if index_part.metadata.disk_consistent_lsn()
|
||||
!= index_part.get_disk_consistent_lsn()
|
||||
{
|
||||
branch_check_errors.push(format!(
|
||||
"Mismatching disk_consistent_lsn in TimelineMetadata ({}) and in the index_part ({})",
|
||||
index_part.metadata.disk_consistent_lsn(),
|
||||
index_part.get_disk_consistent_lsn(),
|
||||
|
||||
))
|
||||
}
|
||||
|
||||
if index_part.layer_metadata.is_empty() {
|
||||
// not an error, can happen for branches with zero writes, but notice that
|
||||
info!("index_part.json has no layers");
|
||||
}
|
||||
|
||||
for (layer, metadata) in index_part.layer_metadata {
|
||||
if metadata.file_size == 0 {
|
||||
branch_check_errors.push(format!(
|
||||
"index_part.json contains a layer {} that has 0 size in its layer metadata", layer.file_name(),
|
||||
))
|
||||
}
|
||||
|
||||
if !s3_layers.remove(&layer) {
|
||||
branch_check_errors.push(format!(
|
||||
"index_part.json contains a layer {} that is not present in S3",
|
||||
layer.file_name(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if !s3_layers.is_empty() {
|
||||
branch_check_errors.push(format!(
|
||||
"index_part.json does not contain layers from S3: {:?}",
|
||||
s3_layers
|
||||
.iter()
|
||||
.map(|layer_name| layer_name.file_name())
|
||||
.collect::<Vec<_>>(),
|
||||
));
|
||||
keys_to_remove.extend(s3_layers.iter().map(|layer_name| {
|
||||
let mut key = s3_root.timeline_root(id).prefix_in_bucket;
|
||||
let delimiter = s3_root.delimiter();
|
||||
if !key.ends_with(delimiter) {
|
||||
key.push_str(delimiter);
|
||||
}
|
||||
key.push_str(&layer_name.file_name());
|
||||
key
|
||||
}));
|
||||
}
|
||||
}
|
||||
BlobDataParseResult::Incorrect(parse_errors) => branch_check_errors.extend(
|
||||
parse_errors
|
||||
.into_iter()
|
||||
.map(|error| format!("parse error: {error}")),
|
||||
),
|
||||
}
|
||||
}
|
||||
None => branch_check_errors.push("Timeline has no data on S3 at all".to_string()),
|
||||
}
|
||||
|
||||
if branch_check_errors.is_empty() {
|
||||
info!("No check errors found");
|
||||
} else {
|
||||
warn!("Found check errors: {branch_check_errors:?}");
|
||||
}
|
||||
|
||||
if !keys_to_remove.is_empty() {
|
||||
error!("The following keys should be removed from S3: {keys_to_remove:?}")
|
||||
}
|
||||
|
||||
branch_check_errors
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct S3TimelineBlobData {
|
||||
blob_data: BlobDataParseResult,
|
||||
keys_to_remove: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum BlobDataParseResult {
|
||||
Parsed {
|
||||
index_part: IndexPart,
|
||||
s3_layers: HashSet<LayerFileName>,
|
||||
},
|
||||
Incorrect(Vec<String>),
|
||||
}
|
||||
|
||||
async fn list_timeline_blobs(
|
||||
s3_client: &Client,
|
||||
id: TenantTimelineId,
|
||||
s3_root: &RootTarget,
|
||||
) -> anyhow::Result<S3TimelineBlobData> {
|
||||
let mut s3_layers = HashSet::new();
|
||||
let mut index_part_object = None;
|
||||
|
||||
let timeline_dir_target = s3_root.timeline_root(id);
|
||||
let mut continuation_token = None;
|
||||
|
||||
let mut errors = Vec::new();
|
||||
let mut keys_to_remove = Vec::new();
|
||||
|
||||
loop {
|
||||
let fetch_response =
|
||||
list_objects_with_retries(s3_client, &timeline_dir_target, continuation_token.clone())
|
||||
.await?;
|
||||
|
||||
let subdirectories = fetch_response.common_prefixes().unwrap_or_default();
|
||||
if !subdirectories.is_empty() {
|
||||
errors.push(format!(
|
||||
"S3 list response should not contain any subdirectories, but got {subdirectories:?}"
|
||||
));
|
||||
}
|
||||
|
||||
for (object, key) in fetch_response
|
||||
.contents()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|object| Some((object, object.key()?)))
|
||||
{
|
||||
let blob_name = key.strip_prefix(&timeline_dir_target.prefix_in_bucket);
|
||||
match blob_name {
|
||||
Some("index_part.json") => index_part_object = Some(object.clone()),
|
||||
Some(maybe_layer_name) => match maybe_layer_name.parse::<LayerFileName>() {
|
||||
Ok(new_layer) => {
|
||||
s3_layers.insert(new_layer);
|
||||
}
|
||||
Err(e) => {
|
||||
errors.push(
|
||||
format!("S3 list response got an object with key {key} that is not a layer name: {e}"),
|
||||
);
|
||||
keys_to_remove.push(key.to_string());
|
||||
}
|
||||
},
|
||||
None => {
|
||||
errors.push(format!("S3 list response got an object with odd key {key}"));
|
||||
keys_to_remove.push(key.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match fetch_response.next_continuation_token {
|
||||
Some(new_token) => continuation_token = Some(new_token),
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
if index_part_object.is_none() {
|
||||
errors.push("S3 list response got no index_part.json file".to_string());
|
||||
}
|
||||
|
||||
if let Some(index_part_object_key) = index_part_object.as_ref().and_then(|object| object.key())
|
||||
{
|
||||
let index_part_bytes = download_object_with_retries(
|
||||
s3_client,
|
||||
&timeline_dir_target.bucket_name,
|
||||
index_part_object_key,
|
||||
)
|
||||
.await
|
||||
.context("index_part.json download")?;
|
||||
|
||||
match serde_json::from_slice(&index_part_bytes) {
|
||||
Ok(index_part) => {
|
||||
return Ok(S3TimelineBlobData {
|
||||
blob_data: BlobDataParseResult::Parsed {
|
||||
index_part,
|
||||
s3_layers,
|
||||
},
|
||||
keys_to_remove,
|
||||
})
|
||||
}
|
||||
Err(index_parse_error) => errors.push(format!(
|
||||
"index_part.json body parsing error: {index_parse_error}"
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
errors.push(format!(
|
||||
"Index part object {index_part_object:?} has no key"
|
||||
));
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
errors.push(
|
||||
"Unexpected: no errors did not lead to a successfully parsed blob return".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(S3TimelineBlobData {
|
||||
blob_data: BlobDataParseResult::Incorrect(errors),
|
||||
keys_to_remove,
|
||||
})
|
||||
}
|
||||
|
||||
async fn download_object_with_retries(
|
||||
s3_client: &Client,
|
||||
bucket_name: &str,
|
||||
key: &str,
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
for _ in 0..MAX_RETRIES {
|
||||
let mut body_buf = Vec::new();
|
||||
let response_stream = match s3_client
|
||||
.get_object()
|
||||
.bucket(bucket_name)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(response) => response,
|
||||
Err(e) => {
|
||||
error!("Failed to download object for key {key}: {e}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match response_stream
|
||||
.body
|
||||
.into_async_read()
|
||||
.read_to_end(&mut body_buf)
|
||||
.await
|
||||
{
|
||||
Ok(bytes_read) => {
|
||||
info!("Downloaded {bytes_read} bytes for object object with key {key}");
|
||||
return Ok(body_buf);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to stream object body for key {key}: {e}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("Failed to download objects with key {key} {MAX_RETRIES} times")
|
||||
}
|
||||
418
s3_scrubber/src/cloud_admin_api.rs
Normal file
418
s3_scrubber/src/cloud_admin_api.rs
Normal file
@@ -0,0 +1,418 @@
|
||||
#![allow(unused)]
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use reqwest::{header, Client, Url};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use utils::id::{TenantId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
context: String,
|
||||
kind: ErrorKind,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
fn new(context: String, kind: ErrorKind) -> Self {
|
||||
Self { context, kind }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match &self.kind {
|
||||
ErrorKind::RequestSend(e) => write!(
|
||||
f,
|
||||
"Failed to send a request. Context: {}, error: {}",
|
||||
self.context, e
|
||||
),
|
||||
ErrorKind::BodyRead(e) => {
|
||||
write!(
|
||||
f,
|
||||
"Failed to read a request body. Context: {}, error: {}",
|
||||
self.context, e
|
||||
)
|
||||
}
|
||||
ErrorKind::UnexpectedState => write!(f, "Unexpected state: {}", self.context),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Deserialize, Hash, PartialEq, Eq)]
|
||||
#[serde(transparent)]
|
||||
pub struct ProjectId(pub String);
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, Hash, PartialEq, Eq)]
|
||||
#[serde(transparent)]
|
||||
pub struct BranchId(pub String);
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ErrorKind {
|
||||
RequestSend(reqwest::Error),
|
||||
BodyRead(reqwest::Error),
|
||||
UnexpectedState,
|
||||
}
|
||||
|
||||
pub struct CloudAdminApiClient {
|
||||
request_limiter: Semaphore,
|
||||
token: String,
|
||||
base_url: Url,
|
||||
http_client: Client,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct AdminApiResponse<T> {
|
||||
data: T,
|
||||
total: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct PageserverData {
|
||||
pub id: u64,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub region_id: String,
|
||||
pub version: i64,
|
||||
pub instance_id: String,
|
||||
pub port: u16,
|
||||
pub http_host: String,
|
||||
pub http_port: u16,
|
||||
pub active: bool,
|
||||
pub projects_count: usize,
|
||||
pub availability_zone_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Deserialize)]
|
||||
pub struct SafekeeperData {
|
||||
pub id: u64,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub region_id: String,
|
||||
pub version: i64,
|
||||
pub instance_id: String,
|
||||
pub active: bool,
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub projects_count: usize,
|
||||
pub availability_zone_id: String,
|
||||
}
|
||||
|
||||
#[serde_with::serde_as]
|
||||
#[derive(Debug, Clone, serde::Deserialize)]
|
||||
pub struct ProjectData {
|
||||
pub id: ProjectId,
|
||||
pub name: String,
|
||||
pub region_id: String,
|
||||
pub platform_id: String,
|
||||
pub user_id: String,
|
||||
pub pageserver_id: u64,
|
||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
||||
pub tenant: TenantId,
|
||||
pub safekeepers: Vec<SafekeeperData>,
|
||||
pub deleted: bool,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub pg_version: u32,
|
||||
pub max_project_size: u64,
|
||||
pub remote_storage_size: u64,
|
||||
pub resident_size: u64,
|
||||
pub synthetic_storage_size: u64,
|
||||
pub compute_time: u64,
|
||||
pub data_transfer: u64,
|
||||
pub data_storage: u64,
|
||||
pub maintenance_set: Option<String>,
|
||||
}
|
||||
|
||||
#[serde_with::serde_as]
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct BranchData {
|
||||
pub id: BranchId,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub name: String,
|
||||
pub project_id: ProjectId,
|
||||
#[serde_as(as = "serde_with::DisplayFromStr")]
|
||||
pub timeline_id: TimelineId,
|
||||
#[serde(default)]
|
||||
pub parent_id: Option<BranchId>,
|
||||
#[serde(default)]
|
||||
#[serde_as(as = "Option<serde_with::DisplayFromStr>")]
|
||||
pub parent_lsn: Option<Lsn>,
|
||||
pub default: bool,
|
||||
pub deleted: bool,
|
||||
pub logical_size: Option<u64>,
|
||||
pub physical_size: Option<u64>,
|
||||
pub written_size: Option<u64>,
|
||||
}
|
||||
|
||||
impl CloudAdminApiClient {
|
||||
pub fn new(token: String, base_url: Url) -> Self {
|
||||
Self {
|
||||
token,
|
||||
base_url,
|
||||
request_limiter: Semaphore::new(200),
|
||||
http_client: Client::new(), // TODO timeout configs at least
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_tenant_project(
|
||||
&self,
|
||||
tenant_id: TenantId,
|
||||
) -> Result<Option<ProjectData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/projects"))
|
||||
.query(&[
|
||||
("tenant_id", tenant_id.to_string()),
|
||||
("show_deleted", "true".to_string()),
|
||||
])
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
Error::new(
|
||||
"Find project for tenant".to_string(),
|
||||
ErrorKind::RequestSend(e),
|
||||
)
|
||||
})?;
|
||||
|
||||
let response: AdminApiResponse<Vec<ProjectData>> = response.json().await.map_err(|e| {
|
||||
Error::new(
|
||||
"Find project for tenant".to_string(),
|
||||
ErrorKind::BodyRead(e),
|
||||
)
|
||||
})?;
|
||||
match response.data.len() {
|
||||
0 => Ok(None),
|
||||
1 => Ok(Some(
|
||||
response
|
||||
.data
|
||||
.into_iter()
|
||||
.next()
|
||||
.expect("Should have exactly one element"),
|
||||
)),
|
||||
too_many => Err(Error::new(
|
||||
format!("Find project for tenant returned {too_many} projects instead of 0 or 1"),
|
||||
ErrorKind::UnexpectedState,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_timeline_branch(
|
||||
&self,
|
||||
timeline_id: TimelineId,
|
||||
) -> Result<Option<BranchData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/branches"))
|
||||
.query(&[
|
||||
("timeline_id", timeline_id.to_string()),
|
||||
("show_deleted", "true".to_string()),
|
||||
])
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
Error::new(
|
||||
"Find branch for timeline".to_string(),
|
||||
ErrorKind::RequestSend(e),
|
||||
)
|
||||
})?;
|
||||
|
||||
let response: AdminApiResponse<Vec<BranchData>> = response.json().await.map_err(|e| {
|
||||
Error::new(
|
||||
"Find branch for timeline".to_string(),
|
||||
ErrorKind::BodyRead(e),
|
||||
)
|
||||
})?;
|
||||
match response.data.len() {
|
||||
0 => Ok(None),
|
||||
1 => Ok(Some(
|
||||
response
|
||||
.data
|
||||
.into_iter()
|
||||
.next()
|
||||
.expect("Should have exactly one element"),
|
||||
)),
|
||||
too_many => Err(Error::new(
|
||||
format!("Find branch for timeline returned {too_many} branches instead of 0 or 1"),
|
||||
ErrorKind::UnexpectedState,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_pageservers(&self) -> Result<Vec<PageserverData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/pageservers"))
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| Error::new("List pageservers".to_string(), ErrorKind::RequestSend(e)))?;
|
||||
|
||||
let response: AdminApiResponse<Vec<PageserverData>> = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| Error::new("List pageservers".to_string(), ErrorKind::BodyRead(e)))?;
|
||||
|
||||
Ok(response.data)
|
||||
}
|
||||
|
||||
pub async fn list_safekeepers(&self) -> Result<Vec<SafekeeperData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/safekeepers"))
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| Error::new("List safekeepers".to_string(), ErrorKind::RequestSend(e)))?;
|
||||
|
||||
let response: AdminApiResponse<Vec<SafekeeperData>> = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| Error::new("List safekeepers".to_string(), ErrorKind::BodyRead(e)))?;
|
||||
|
||||
Ok(response.data)
|
||||
}
|
||||
|
||||
pub async fn projects_for_pageserver(
|
||||
&self,
|
||||
pageserver_id: u64,
|
||||
show_deleted: bool,
|
||||
) -> Result<Vec<ProjectData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/projects"))
|
||||
.query(&[
|
||||
("pageserver_id", &pageserver_id.to_string()),
|
||||
("show_deleted", &show_deleted.to_string()),
|
||||
])
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| Error::new("Project for tenant".to_string(), ErrorKind::RequestSend(e)))?;
|
||||
|
||||
let response: AdminApiResponse<Vec<ProjectData>> = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| Error::new("Project for tenant".to_string(), ErrorKind::BodyRead(e)))?;
|
||||
|
||||
Ok(response.data)
|
||||
}
|
||||
|
||||
pub async fn project_for_tenant(
|
||||
&self,
|
||||
tenant_id: TenantId,
|
||||
show_deleted: bool,
|
||||
) -> Result<Option<ProjectData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/projects"))
|
||||
.query(&[
|
||||
("search", &tenant_id.to_string()),
|
||||
("show_deleted", &show_deleted.to_string()),
|
||||
])
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| Error::new("Project for tenant".to_string(), ErrorKind::RequestSend(e)))?;
|
||||
|
||||
let response: AdminApiResponse<Vec<ProjectData>> = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| Error::new("Project for tenant".to_string(), ErrorKind::BodyRead(e)))?;
|
||||
|
||||
match response.data.as_slice() {
|
||||
[] => Ok(None),
|
||||
[_single] => Ok(Some(response.data.into_iter().next().unwrap())),
|
||||
multiple => Err(Error::new(
|
||||
format!("Got more than one project for tenant {tenant_id} : {multiple:?}"),
|
||||
ErrorKind::UnexpectedState,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn branches_for_project(
|
||||
&self,
|
||||
project_id: &ProjectId,
|
||||
show_deleted: bool,
|
||||
) -> Result<Vec<BranchData>, Error> {
|
||||
let _permit = self
|
||||
.request_limiter
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore is not closed");
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.get(self.append_url("/branches"))
|
||||
.query(&[
|
||||
("project_id", &project_id.0),
|
||||
("show_deleted", &show_deleted.to_string()),
|
||||
])
|
||||
.header(header::ACCEPT, "application/json")
|
||||
.bearer_auth(&self.token)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| Error::new("Project for tenant".to_string(), ErrorKind::RequestSend(e)))?;
|
||||
|
||||
let response: AdminApiResponse<Vec<BranchData>> = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| Error::new("Project for tenant".to_string(), ErrorKind::BodyRead(e)))?;
|
||||
|
||||
Ok(response.data)
|
||||
}
|
||||
|
||||
fn append_url(&self, subpath: &str) -> Url {
|
||||
// TODO fugly, but `.join` does not work when called
|
||||
(self.base_url.to_string() + subpath)
|
||||
.parse()
|
||||
.unwrap_or_else(|e| panic!("Could not append {subpath} to base url: {e}"))
|
||||
}
|
||||
}
|
||||
354
s3_scrubber/src/delete_batch_producer.rs
Normal file
354
s3_scrubber/src/delete_batch_producer.rs
Normal file
@@ -0,0 +1,354 @@
|
||||
mod tenant_batch;
|
||||
mod timeline_batch;
|
||||
|
||||
use std::future::Future;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aws_sdk_s3::Client;
|
||||
use either::Either;
|
||||
use tokio::sync::mpsc::UnboundedReceiver;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::{JoinHandle, JoinSet};
|
||||
use tracing::{error, info, info_span, Instrument};
|
||||
|
||||
use crate::cloud_admin_api::{BranchData, CloudAdminApiClient, ProjectData};
|
||||
use crate::{list_objects_with_retries, RootTarget, S3Target, TraversingDepth, MAX_RETRIES};
|
||||
use utils::id::{TenantId, TenantTimelineId};
|
||||
|
||||
/// Typical tenant to remove contains 1 layer and 1 index_part.json blobs
|
||||
/// Also, there are some non-standard tenants to remove, having more layers.
|
||||
/// delete_objects request allows up to 1000 keys, so be on a safe side and allow most
|
||||
/// batch processing tasks to do 1 delete objects request only.
|
||||
///
|
||||
/// Every batch item will be additionally S3 LS'ed later, so keep the batch size
|
||||
/// even lower to allow multiple concurrent tasks do the LS requests.
|
||||
const BATCH_SIZE: usize = 100;
|
||||
|
||||
pub struct DeleteBatchProducer {
|
||||
delete_tenants_sender_task: JoinHandle<anyhow::Result<ProcessedS3List<TenantId, ProjectData>>>,
|
||||
delete_timelines_sender_task:
|
||||
JoinHandle<anyhow::Result<ProcessedS3List<TenantTimelineId, BranchData>>>,
|
||||
delete_batch_creator_task: JoinHandle<()>,
|
||||
delete_batch_receiver: Arc<Mutex<UnboundedReceiver<DeleteBatch>>>,
|
||||
}
|
||||
|
||||
pub struct DeleteProducerStats {
|
||||
pub tenant_stats: ProcessedS3List<TenantId, ProjectData>,
|
||||
pub timeline_stats: Option<ProcessedS3List<TenantTimelineId, BranchData>>,
|
||||
}
|
||||
|
||||
impl DeleteProducerStats {
|
||||
pub fn tenants_checked(&self) -> usize {
|
||||
self.tenant_stats.entries_total
|
||||
}
|
||||
|
||||
pub fn active_tenants(&self) -> usize {
|
||||
self.tenant_stats.active_entries.len()
|
||||
}
|
||||
|
||||
pub fn timelines_checked(&self) -> usize {
|
||||
self.timeline_stats
|
||||
.as_ref()
|
||||
.map(|stats| stats.entries_total)
|
||||
.unwrap_or(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct DeleteBatch {
|
||||
pub tenants: Vec<TenantId>,
|
||||
pub timelines: Vec<TenantTimelineId>,
|
||||
}
|
||||
|
||||
impl DeleteBatch {
|
||||
pub fn merge(&mut self, other: Self) {
|
||||
self.tenants.extend(other.tenants);
|
||||
self.timelines.extend(other.timelines);
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.tenants.len() + self.timelines.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl DeleteBatchProducer {
|
||||
pub fn start(
|
||||
admin_client: Arc<CloudAdminApiClient>,
|
||||
s3_client: Arc<Client>,
|
||||
s3_root_target: RootTarget,
|
||||
traversing_depth: TraversingDepth,
|
||||
) -> Self {
|
||||
let (delete_elements_sender, mut delete_elements_receiver) =
|
||||
tokio::sync::mpsc::unbounded_channel();
|
||||
let delete_elements_sender = Arc::new(delete_elements_sender);
|
||||
let admin_client = Arc::new(admin_client);
|
||||
|
||||
let (projects_to_check_sender, mut projects_to_check_receiver) =
|
||||
tokio::sync::mpsc::unbounded_channel();
|
||||
let delete_tenants_root_target = s3_root_target.clone();
|
||||
let delete_tenants_client = Arc::clone(&s3_client);
|
||||
let delete_tenants_admin_client = Arc::clone(&admin_client);
|
||||
let delete_sender = Arc::clone(&delete_elements_sender);
|
||||
let delete_tenants_sender_task = tokio::spawn(
|
||||
async move {
|
||||
tenant_batch::schedule_cleanup_deleted_tenants(
|
||||
&delete_tenants_root_target,
|
||||
&delete_tenants_client,
|
||||
&delete_tenants_admin_client,
|
||||
projects_to_check_sender,
|
||||
delete_sender,
|
||||
traversing_depth,
|
||||
)
|
||||
.await
|
||||
}
|
||||
.instrument(info_span!("delete_tenants_sender")),
|
||||
);
|
||||
let delete_timelines_sender_task = tokio::spawn(async move {
|
||||
timeline_batch::schedule_cleanup_deleted_timelines(
|
||||
&s3_root_target,
|
||||
&s3_client,
|
||||
&admin_client,
|
||||
&mut projects_to_check_receiver,
|
||||
delete_elements_sender,
|
||||
)
|
||||
.in_current_span()
|
||||
.await
|
||||
});
|
||||
|
||||
let (delete_batch_sender, delete_batch_receiver) = tokio::sync::mpsc::unbounded_channel();
|
||||
let delete_batch_creator_task = tokio::spawn(
|
||||
async move {
|
||||
'outer: loop {
|
||||
let mut delete_batch = DeleteBatch::default();
|
||||
while delete_batch.len() < BATCH_SIZE {
|
||||
match delete_elements_receiver.recv().await {
|
||||
Some(new_task) => match new_task {
|
||||
Either::Left(tenant_id) => delete_batch.tenants.push(tenant_id),
|
||||
Either::Right(timeline_id) => {
|
||||
delete_batch.timelines.push(timeline_id)
|
||||
}
|
||||
},
|
||||
None => {
|
||||
info!("Task finished: sender dropped");
|
||||
delete_batch_sender.send(delete_batch).ok();
|
||||
break 'outer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !delete_batch.is_empty() {
|
||||
delete_batch_sender.send(delete_batch).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
.instrument(info_span!("delete batch creator")),
|
||||
);
|
||||
|
||||
Self {
|
||||
delete_tenants_sender_task,
|
||||
delete_timelines_sender_task,
|
||||
delete_batch_creator_task,
|
||||
delete_batch_receiver: Arc::new(Mutex::new(delete_batch_receiver)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscribe(&self) -> Arc<Mutex<UnboundedReceiver<DeleteBatch>>> {
|
||||
self.delete_batch_receiver.clone()
|
||||
}
|
||||
|
||||
pub async fn join(self) -> anyhow::Result<DeleteProducerStats> {
|
||||
let (delete_tenants_task_result, delete_timelines_task_result, batch_task_result) = tokio::join!(
|
||||
self.delete_tenants_sender_task,
|
||||
self.delete_timelines_sender_task,
|
||||
self.delete_batch_creator_task,
|
||||
);
|
||||
|
||||
let tenant_stats = match delete_tenants_task_result {
|
||||
Ok(Ok(stats)) => stats,
|
||||
Ok(Err(tenant_deletion_error)) => return Err(tenant_deletion_error),
|
||||
Err(join_error) => {
|
||||
anyhow::bail!("Failed to join the delete tenant producing task: {join_error}")
|
||||
}
|
||||
};
|
||||
|
||||
let timeline_stats = match delete_timelines_task_result {
|
||||
Ok(Ok(stats)) => Some(stats),
|
||||
Ok(Err(timeline_deletion_error)) => return Err(timeline_deletion_error),
|
||||
Err(join_error) => {
|
||||
anyhow::bail!("Failed to join the delete timeline producing task: {join_error}")
|
||||
}
|
||||
};
|
||||
|
||||
match batch_task_result {
|
||||
Ok(()) => (),
|
||||
Err(join_error) => anyhow::bail!("Failed to join the batch forming task: {join_error}"),
|
||||
};
|
||||
|
||||
Ok(DeleteProducerStats {
|
||||
tenant_stats,
|
||||
timeline_stats,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessedS3List<I, A> {
|
||||
pub entries_total: usize,
|
||||
pub entries_to_delete: Vec<I>,
|
||||
pub active_entries: Vec<A>,
|
||||
}
|
||||
|
||||
impl<I, A> Default for ProcessedS3List<I, A> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
entries_total: 0,
|
||||
entries_to_delete: Vec::new(),
|
||||
active_entries: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, A> ProcessedS3List<I, A> {
|
||||
fn merge(&mut self, other: Self) {
|
||||
self.entries_total += other.entries_total;
|
||||
self.entries_to_delete.extend(other.entries_to_delete);
|
||||
self.active_entries.extend(other.active_entries);
|
||||
}
|
||||
|
||||
fn change_ids<NewI>(self, transform: impl Fn(I) -> NewI) -> ProcessedS3List<NewI, A> {
|
||||
ProcessedS3List {
|
||||
entries_total: self.entries_total,
|
||||
entries_to_delete: self.entries_to_delete.into_iter().map(transform).collect(),
|
||||
active_entries: self.active_entries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_s3_target_recursively<F, Fut, I, E, A>(
|
||||
s3_client: &Client,
|
||||
target: &S3Target,
|
||||
find_active_and_deleted_entries: F,
|
||||
) -> anyhow::Result<ProcessedS3List<I, A>>
|
||||
where
|
||||
I: FromStr<Err = E> + Send + Sync,
|
||||
E: Send + Sync + std::error::Error + 'static,
|
||||
F: FnOnce(Vec<I>) -> Fut + Clone,
|
||||
Fut: Future<Output = anyhow::Result<ProcessedS3List<I, A>>>,
|
||||
{
|
||||
let mut continuation_token = None;
|
||||
let mut total_entries = ProcessedS3List::default();
|
||||
|
||||
loop {
|
||||
let fetch_response =
|
||||
list_objects_with_retries(s3_client, target, continuation_token.clone()).await?;
|
||||
|
||||
let new_entry_ids = fetch_response
|
||||
.common_prefixes()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|prefix| prefix.prefix())
|
||||
.filter_map(|prefix| -> Option<&str> {
|
||||
prefix
|
||||
.strip_prefix(&target.prefix_in_bucket)?
|
||||
.strip_suffix('/')
|
||||
})
|
||||
.map(|entry_id_str| {
|
||||
entry_id_str
|
||||
.parse()
|
||||
.with_context(|| format!("Incorrect entry id str: {entry_id_str}"))
|
||||
})
|
||||
.collect::<anyhow::Result<Vec<I>>>()
|
||||
.context("list and parse bucket's entry ids")?;
|
||||
|
||||
total_entries.merge(
|
||||
(find_active_and_deleted_entries.clone())(new_entry_ids)
|
||||
.await
|
||||
.context("filter active and deleted entry ids")?,
|
||||
);
|
||||
|
||||
match fetch_response.next_continuation_token {
|
||||
Some(new_token) => continuation_token = Some(new_token),
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(total_entries)
|
||||
}
|
||||
|
||||
enum FetchResult<A> {
|
||||
Found(A),
|
||||
Deleted,
|
||||
Absent,
|
||||
}
|
||||
|
||||
async fn split_to_active_and_deleted_entries<I, A, F, Fut>(
|
||||
new_entry_ids: Vec<I>,
|
||||
find_active_entry: F,
|
||||
) -> anyhow::Result<ProcessedS3List<I, A>>
|
||||
where
|
||||
I: std::fmt::Display + Send + Sync + 'static + Copy,
|
||||
A: Send + 'static,
|
||||
F: FnOnce(I) -> Fut + Send + Sync + 'static + Clone,
|
||||
Fut: Future<Output = anyhow::Result<FetchResult<A>>> + Send,
|
||||
{
|
||||
let entries_total = new_entry_ids.len();
|
||||
let mut check_tasks = JoinSet::new();
|
||||
let mut active_entries = Vec::with_capacity(entries_total);
|
||||
let mut entries_to_delete = Vec::with_capacity(entries_total);
|
||||
|
||||
for new_entry_id in new_entry_ids {
|
||||
let check_closure = find_active_entry.clone();
|
||||
check_tasks.spawn(
|
||||
async move {
|
||||
(
|
||||
new_entry_id,
|
||||
async {
|
||||
for _ in 0..MAX_RETRIES {
|
||||
let closure_clone = check_closure.clone();
|
||||
match closure_clone(new_entry_id).await {
|
||||
Ok(active_entry) => return Ok(active_entry),
|
||||
Err(e) => {
|
||||
error!("find active entry admin API call failed: {e}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("Failed to check entry {new_entry_id} {MAX_RETRIES} times")
|
||||
}
|
||||
.await,
|
||||
)
|
||||
}
|
||||
.instrument(info_span!("filter_active_entries")),
|
||||
);
|
||||
}
|
||||
|
||||
while let Some(task_result) = check_tasks.join_next().await {
|
||||
let (entry_id, entry_data_fetch_result) = task_result.context("task join")?;
|
||||
match entry_data_fetch_result.context("entry data fetch")? {
|
||||
FetchResult::Found(active_entry) => {
|
||||
info!("Entry {entry_id} is alive, cannot delete");
|
||||
active_entries.push(active_entry);
|
||||
}
|
||||
FetchResult::Deleted => {
|
||||
info!("Entry {entry_id} deleted in the admin data, can safely delete");
|
||||
entries_to_delete.push(entry_id);
|
||||
}
|
||||
FetchResult::Absent => {
|
||||
info!("Entry {entry_id} absent in the admin data, can safely delete");
|
||||
entries_to_delete.push(entry_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ProcessedS3List {
|
||||
entries_total,
|
||||
entries_to_delete,
|
||||
active_entries,
|
||||
})
|
||||
}
|
||||
87
s3_scrubber/src/delete_batch_producer/tenant_batch.rs
Normal file
87
s3_scrubber/src/delete_batch_producer/tenant_batch.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use aws_sdk_s3::Client;
|
||||
use either::Either;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tracing::info;
|
||||
|
||||
use crate::cloud_admin_api::{CloudAdminApiClient, ProjectData};
|
||||
use crate::delete_batch_producer::FetchResult;
|
||||
use crate::{RootTarget, TraversingDepth};
|
||||
use utils::id::{TenantId, TenantTimelineId};
|
||||
|
||||
use super::ProcessedS3List;
|
||||
|
||||
pub async fn schedule_cleanup_deleted_tenants(
|
||||
s3_root_target: &RootTarget,
|
||||
s3_client: &Arc<Client>,
|
||||
admin_client: &Arc<CloudAdminApiClient>,
|
||||
projects_to_check_sender: UnboundedSender<ProjectData>,
|
||||
delete_sender: Arc<UnboundedSender<Either<TenantId, TenantTimelineId>>>,
|
||||
traversing_depth: TraversingDepth,
|
||||
) -> anyhow::Result<ProcessedS3List<TenantId, ProjectData>> {
|
||||
info!(
|
||||
"Starting to list the bucket from root {}",
|
||||
s3_root_target.bucket_name()
|
||||
);
|
||||
s3_client
|
||||
.head_bucket()
|
||||
.bucket(s3_root_target.bucket_name())
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("bucket {} was not found", s3_root_target.bucket_name()))?;
|
||||
|
||||
let check_client = Arc::clone(admin_client);
|
||||
let tenant_stats = super::process_s3_target_recursively(
|
||||
s3_client,
|
||||
s3_root_target.tenants_root(),
|
||||
|s3_tenants| async move {
|
||||
let another_client = Arc::clone(&check_client);
|
||||
super::split_to_active_and_deleted_entries(s3_tenants, move |tenant_id| async move {
|
||||
let project_data = another_client
|
||||
.find_tenant_project(tenant_id)
|
||||
.await
|
||||
.with_context(|| format!("Tenant {tenant_id} project admin check"))?;
|
||||
|
||||
Ok(if let Some(console_project) = project_data {
|
||||
if console_project.deleted {
|
||||
delete_sender.send(Either::Left(tenant_id)).ok();
|
||||
FetchResult::Deleted
|
||||
} else {
|
||||
if traversing_depth == TraversingDepth::Timeline {
|
||||
projects_to_check_sender.send(console_project.clone()).ok();
|
||||
}
|
||||
FetchResult::Found(console_project)
|
||||
}
|
||||
} else {
|
||||
delete_sender.send(Either::Left(tenant_id)).ok();
|
||||
FetchResult::Absent
|
||||
})
|
||||
})
|
||||
.await
|
||||
},
|
||||
)
|
||||
.await
|
||||
.context("tenant batch processing")?;
|
||||
|
||||
info!(
|
||||
"Among {} tenants, found {} tenants to delete and {} active ones",
|
||||
tenant_stats.entries_total,
|
||||
tenant_stats.entries_to_delete.len(),
|
||||
tenant_stats.active_entries.len(),
|
||||
);
|
||||
|
||||
let tenant_stats = match traversing_depth {
|
||||
TraversingDepth::Tenant => {
|
||||
info!("Finished listing the bucket for tenants only");
|
||||
tenant_stats
|
||||
}
|
||||
TraversingDepth::Timeline => {
|
||||
info!("Finished listing the bucket for tenants and sent {} active tenants to check for timelines", tenant_stats.active_entries.len());
|
||||
tenant_stats
|
||||
}
|
||||
};
|
||||
|
||||
Ok(tenant_stats)
|
||||
}
|
||||
102
s3_scrubber/src/delete_batch_producer/timeline_batch.rs
Normal file
102
s3_scrubber/src/delete_batch_producer/timeline_batch.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use aws_sdk_s3::Client;
|
||||
use either::Either;
|
||||
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
|
||||
use tracing::{info, info_span, Instrument};
|
||||
|
||||
use crate::cloud_admin_api::{BranchData, CloudAdminApiClient, ProjectData};
|
||||
use crate::delete_batch_producer::{FetchResult, ProcessedS3List};
|
||||
use crate::RootTarget;
|
||||
use utils::id::{TenantId, TenantTimelineId};
|
||||
|
||||
pub async fn schedule_cleanup_deleted_timelines(
|
||||
s3_root_target: &RootTarget,
|
||||
s3_client: &Arc<Client>,
|
||||
admin_client: &Arc<CloudAdminApiClient>,
|
||||
projects_to_check_receiver: &mut UnboundedReceiver<ProjectData>,
|
||||
delete_elements_sender: Arc<UnboundedSender<Either<TenantId, TenantTimelineId>>>,
|
||||
) -> anyhow::Result<ProcessedS3List<TenantTimelineId, BranchData>> {
|
||||
info!(
|
||||
"Starting to list the bucket from root {}",
|
||||
s3_root_target.bucket_name()
|
||||
);
|
||||
s3_client
|
||||
.head_bucket()
|
||||
.bucket(s3_root_target.bucket_name())
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("bucket {} was not found", s3_root_target.bucket_name()))?;
|
||||
|
||||
let mut timeline_stats = ProcessedS3List::default();
|
||||
while let Some(project_to_check) = projects_to_check_receiver.recv().await {
|
||||
let check_client = Arc::clone(admin_client);
|
||||
|
||||
let check_s3_client = Arc::clone(s3_client);
|
||||
|
||||
let check_delete_sender = Arc::clone(&delete_elements_sender);
|
||||
|
||||
let check_root = s3_root_target.clone();
|
||||
|
||||
let new_stats = async move {
|
||||
let tenant_id_to_check = project_to_check.tenant;
|
||||
let check_target = check_root.timelines_root(tenant_id_to_check);
|
||||
let stats = super::process_s3_target_recursively(
|
||||
&check_s3_client,
|
||||
&check_target,
|
||||
|s3_timelines| async move {
|
||||
let another_client = check_client.clone();
|
||||
super::split_to_active_and_deleted_entries(
|
||||
s3_timelines,
|
||||
move |timeline_id| async move {
|
||||
let console_branch = another_client
|
||||
.find_timeline_branch(timeline_id)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Timeline {timeline_id} branch admin check: {e}"
|
||||
)
|
||||
})?;
|
||||
|
||||
let id = TenantTimelineId::new(tenant_id_to_check, timeline_id);
|
||||
Ok(match console_branch {
|
||||
Some(console_branch) => {
|
||||
if console_branch.deleted {
|
||||
check_delete_sender.send(Either::Right(id)).ok();
|
||||
FetchResult::Deleted
|
||||
} else {
|
||||
FetchResult::Found(console_branch)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
check_delete_sender.send(Either::Right(id)).ok();
|
||||
FetchResult::Absent
|
||||
}
|
||||
})
|
||||
},
|
||||
)
|
||||
.await
|
||||
},
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("tenant {tenant_id_to_check} timeline batch processing"))?
|
||||
.change_ids(|timeline_id| TenantTimelineId::new(tenant_id_to_check, timeline_id));
|
||||
|
||||
Ok::<_, anyhow::Error>(stats)
|
||||
}
|
||||
.instrument(info_span!("delete_timelines_sender", tenant = %project_to_check.tenant))
|
||||
.await?;
|
||||
|
||||
timeline_stats.merge(new_stats);
|
||||
}
|
||||
|
||||
info!(
|
||||
"Among {} timelines, found {} timelines to delete and {} active ones",
|
||||
timeline_stats.entries_total,
|
||||
timeline_stats.entries_to_delete.len(),
|
||||
timeline_stats.active_entries.len(),
|
||||
);
|
||||
|
||||
Ok(timeline_stats)
|
||||
}
|
||||
204
s3_scrubber/src/lib.rs
Normal file
204
s3_scrubber/src/lib.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
pub mod checks;
|
||||
pub mod cloud_admin_api;
|
||||
pub mod delete_batch_producer;
|
||||
mod s3_deletion;
|
||||
|
||||
use std::env;
|
||||
use std::fmt::Display;
|
||||
use std::time::Duration;
|
||||
|
||||
use aws_config::environment::EnvironmentVariableCredentialsProvider;
|
||||
use aws_config::imds::credentials::ImdsCredentialsProvider;
|
||||
use aws_config::meta::credentials::CredentialsProviderChain;
|
||||
use aws_config::sso::SsoCredentialsProvider;
|
||||
use aws_sdk_s3::config::Region;
|
||||
use aws_sdk_s3::{Client, Config};
|
||||
|
||||
pub use s3_deletion::S3Deleter;
|
||||
use tracing::error;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
use utils::id::{TenantId, TenantTimelineId};
|
||||
|
||||
const MAX_RETRIES: usize = 20;
|
||||
const CLOUD_ADMIN_API_TOKEN_ENV_VAR: &str = "CLOUD_ADMIN_API_TOKEN";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct S3Target {
|
||||
pub bucket_name: String,
|
||||
pub prefix_in_bucket: String,
|
||||
pub delimiter: String,
|
||||
}
|
||||
|
||||
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TraversingDepth {
|
||||
Tenant,
|
||||
Timeline,
|
||||
}
|
||||
|
||||
impl Display for TraversingDepth {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(match self {
|
||||
Self::Tenant => "tenant",
|
||||
Self::Timeline => "timeline",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl S3Target {
|
||||
pub fn with_sub_segment(&self, new_segment: &str) -> Self {
|
||||
let mut new_self = self.clone();
|
||||
let _ = new_self.prefix_in_bucket.pop();
|
||||
new_self.prefix_in_bucket =
|
||||
[&new_self.prefix_in_bucket, new_segment, ""].join(&new_self.delimiter);
|
||||
new_self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum RootTarget {
|
||||
Pageserver(S3Target),
|
||||
Safekeeper(S3Target),
|
||||
}
|
||||
|
||||
impl RootTarget {
|
||||
pub fn tenants_root(&self) -> &S3Target {
|
||||
match self {
|
||||
Self::Pageserver(root) => root,
|
||||
Self::Safekeeper(root) => root,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tenant_root(&self, tenant_id: TenantId) -> S3Target {
|
||||
self.tenants_root().with_sub_segment(&tenant_id.to_string())
|
||||
}
|
||||
|
||||
pub fn timelines_root(&self, tenant_id: TenantId) -> S3Target {
|
||||
match self {
|
||||
Self::Pageserver(_) => self.tenant_root(tenant_id).with_sub_segment("timelines"),
|
||||
Self::Safekeeper(_) => self.tenant_root(tenant_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn timeline_root(&self, id: TenantTimelineId) -> S3Target {
|
||||
self.timelines_root(id.tenant_id)
|
||||
.with_sub_segment(&id.timeline_id.to_string())
|
||||
}
|
||||
|
||||
pub fn bucket_name(&self) -> &str {
|
||||
match self {
|
||||
Self::Pageserver(root) => &root.bucket_name,
|
||||
Self::Safekeeper(root) => &root.bucket_name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delimiter(&self) -> &str {
|
||||
match self {
|
||||
Self::Pageserver(root) => &root.delimiter,
|
||||
Self::Safekeeper(root) => &root.delimiter,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_cloud_admin_api_token_or_exit() -> String {
|
||||
match env::var(CLOUD_ADMIN_API_TOKEN_ENV_VAR) {
|
||||
Ok(token) => token,
|
||||
Err(env::VarError::NotPresent) => {
|
||||
error!("{CLOUD_ADMIN_API_TOKEN_ENV_VAR} env variable is not present");
|
||||
std::process::exit(1);
|
||||
}
|
||||
Err(env::VarError::NotUnicode(not_unicode_string)) => {
|
||||
error!("{CLOUD_ADMIN_API_TOKEN_ENV_VAR} env variable's value is not a valid unicode string: {not_unicode_string:?}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_logging(binary_name: &str, dry_run: bool, node_kind: &str) -> WorkerGuard {
|
||||
let file_name = if dry_run {
|
||||
format!(
|
||||
"{}_{}_{}__dry.log",
|
||||
binary_name,
|
||||
node_kind,
|
||||
chrono::Utc::now().format("%Y_%m_%d__%H_%M_%S")
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{}_{}_{}.log",
|
||||
binary_name,
|
||||
node_kind,
|
||||
chrono::Utc::now().format("%Y_%m_%d__%H_%M_%S")
|
||||
)
|
||||
};
|
||||
|
||||
let (file_writer, guard) =
|
||||
tracing_appender::non_blocking(tracing_appender::rolling::never("./logs/", file_name));
|
||||
|
||||
let file_logs = fmt::Layer::new()
|
||||
.with_target(false)
|
||||
.with_ansi(false)
|
||||
.with_writer(file_writer);
|
||||
let stdout_logs = fmt::Layer::new()
|
||||
.with_target(false)
|
||||
.with_ansi(atty::is(atty::Stream::Stdout))
|
||||
.with_writer(std::io::stdout);
|
||||
tracing_subscriber::registry()
|
||||
.with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")))
|
||||
.with(file_logs)
|
||||
.with(stdout_logs)
|
||||
.init();
|
||||
|
||||
guard
|
||||
}
|
||||
|
||||
pub fn init_s3_client(account_id: String, bucket_region: Region) -> Client {
|
||||
let credentials_provider = {
|
||||
// uses "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"
|
||||
CredentialsProviderChain::first_try("env", EnvironmentVariableCredentialsProvider::new())
|
||||
// uses sso
|
||||
.or_else(
|
||||
"sso",
|
||||
SsoCredentialsProvider::builder()
|
||||
.account_id(account_id)
|
||||
.role_name("PowerUserAccess")
|
||||
.start_url("https://neondb.awsapps.com/start")
|
||||
.region(Region::from_static("eu-central-1"))
|
||||
.build(),
|
||||
)
|
||||
// uses imds v2
|
||||
.or_else("imds", ImdsCredentialsProvider::builder().build())
|
||||
};
|
||||
|
||||
let config = Config::builder()
|
||||
.region(bucket_region)
|
||||
.credentials_provider(credentials_provider)
|
||||
.build();
|
||||
|
||||
Client::from_conf(config)
|
||||
}
|
||||
|
||||
async fn list_objects_with_retries(
|
||||
s3_client: &Client,
|
||||
s3_target: &S3Target,
|
||||
continuation_token: Option<String>,
|
||||
) -> anyhow::Result<aws_sdk_s3::operation::list_objects_v2::ListObjectsV2Output> {
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match s3_client
|
||||
.list_objects_v2()
|
||||
.bucket(&s3_target.bucket_name)
|
||||
.prefix(&s3_target.prefix_in_bucket)
|
||||
.delimiter(&s3_target.delimiter)
|
||||
.set_continuation_token(continuation_token.clone())
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(response) => return Ok(response),
|
||||
Err(e) => {
|
||||
error!("list_objects_v2 query failed: {e}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::bail!("Failed to list objects {MAX_RETRIES} times")
|
||||
}
|
||||
268
s3_scrubber/src/main.rs
Normal file
268
s3_scrubber/src/main.rs
Normal file
@@ -0,0 +1,268 @@
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fmt::Display;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use aws_sdk_s3::config::Region;
|
||||
use reqwest::Url;
|
||||
use s3_scrubber::cloud_admin_api::CloudAdminApiClient;
|
||||
use s3_scrubber::delete_batch_producer::DeleteBatchProducer;
|
||||
use s3_scrubber::{
|
||||
checks, get_cloud_admin_api_token_or_exit, init_logging, init_s3_client, RootTarget, S3Deleter,
|
||||
S3Target, TraversingDepth,
|
||||
};
|
||||
use tracing::{info, info_span, warn};
|
||||
|
||||
use clap::{Parser, Subcommand, ValueEnum};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
#[command(arg_required_else_help(true))]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
delete: bool,
|
||||
}
|
||||
|
||||
#[derive(ValueEnum, Clone, Copy, Eq, PartialEq)]
|
||||
enum NodeKind {
|
||||
Safekeeper,
|
||||
Pageserver,
|
||||
}
|
||||
|
||||
impl NodeKind {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Safekeeper => "safekeeper",
|
||||
Self::Pageserver => "pageserver",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for NodeKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Command {
|
||||
Tidy {
|
||||
#[arg(short, long)]
|
||||
node_kind: NodeKind,
|
||||
#[arg(short, long, default_value_t=TraversingDepth::Tenant)]
|
||||
depth: TraversingDepth,
|
||||
#[arg(short, long, default_value_t = false)]
|
||||
skip_validation: bool,
|
||||
},
|
||||
}
|
||||
|
||||
struct BucketConfig {
|
||||
region: String,
|
||||
bucket: String,
|
||||
sso_account_id: String,
|
||||
}
|
||||
|
||||
impl Display for BucketConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}/{}/{}", self.sso_account_id, self.region, self.bucket)
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketConfig {
|
||||
fn from_env() -> anyhow::Result<Self> {
|
||||
let sso_account_id =
|
||||
env::var("SSO_ACCOUNT_ID").context("'SSO_ACCOUNT_ID' param retrieval")?;
|
||||
let region = env::var("REGION").context("'REGION' param retrieval")?;
|
||||
let bucket = env::var("BUCKET").context("'BUCKET' param retrieval")?;
|
||||
|
||||
Ok(Self {
|
||||
region,
|
||||
bucket,
|
||||
sso_account_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct ConsoleConfig {
|
||||
admin_api_url: Url,
|
||||
}
|
||||
|
||||
impl ConsoleConfig {
|
||||
fn from_env() -> anyhow::Result<Self> {
|
||||
let admin_api_url: Url = env::var("CLOUD_ADMIN_API_URL")
|
||||
.context("'CLOUD_ADMIN_API_URL' param retrieval")?
|
||||
.parse()
|
||||
.context("'CLOUD_ADMIN_API_URL' param parsing")?;
|
||||
|
||||
Ok(Self { admin_api_url })
|
||||
}
|
||||
}
|
||||
|
||||
async fn tidy(
|
||||
cli: &Cli,
|
||||
bucket_config: BucketConfig,
|
||||
console_config: ConsoleConfig,
|
||||
node_kind: NodeKind,
|
||||
depth: TraversingDepth,
|
||||
skip_validation: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let binary_name = env::args()
|
||||
.next()
|
||||
.context("binary name in not the first argument")?;
|
||||
|
||||
let dry_run = !cli.delete;
|
||||
let _guard = init_logging(&binary_name, dry_run, node_kind.as_str());
|
||||
let _main_span = info_span!("tidy", binary = %binary_name, %dry_run).entered();
|
||||
|
||||
if dry_run {
|
||||
info!("Dry run, not removing items for real");
|
||||
} else {
|
||||
warn!("Dry run disabled, removing bucket items for real");
|
||||
}
|
||||
|
||||
info!("skip_validation={skip_validation}");
|
||||
|
||||
info!("Starting extra S3 removal in {bucket_config} for node kind '{node_kind}', traversing depth: {depth:?}");
|
||||
|
||||
info!("Starting extra tenant S3 removal in {bucket_config} for node kind '{node_kind}'");
|
||||
let cloud_admin_api_client = Arc::new(CloudAdminApiClient::new(
|
||||
get_cloud_admin_api_token_or_exit(),
|
||||
console_config.admin_api_url,
|
||||
));
|
||||
|
||||
let bucket_region = Region::new(bucket_config.region);
|
||||
let delimiter = "/".to_string();
|
||||
let s3_client = Arc::new(init_s3_client(bucket_config.sso_account_id, bucket_region));
|
||||
let s3_root = match node_kind {
|
||||
NodeKind::Pageserver => RootTarget::Pageserver(S3Target {
|
||||
bucket_name: bucket_config.bucket,
|
||||
prefix_in_bucket: ["pageserver", "v1", "tenants", ""].join(&delimiter),
|
||||
delimiter,
|
||||
}),
|
||||
NodeKind::Safekeeper => RootTarget::Safekeeper(S3Target {
|
||||
bucket_name: bucket_config.bucket,
|
||||
prefix_in_bucket: ["safekeeper", "v1", "wal", ""].join(&delimiter),
|
||||
delimiter,
|
||||
}),
|
||||
};
|
||||
|
||||
let delete_batch_producer = DeleteBatchProducer::start(
|
||||
Arc::clone(&cloud_admin_api_client),
|
||||
Arc::clone(&s3_client),
|
||||
s3_root.clone(),
|
||||
depth,
|
||||
);
|
||||
|
||||
let s3_deleter = S3Deleter::new(
|
||||
dry_run,
|
||||
NonZeroUsize::new(15).unwrap(),
|
||||
Arc::clone(&s3_client),
|
||||
delete_batch_producer.subscribe(),
|
||||
s3_root.clone(),
|
||||
);
|
||||
|
||||
let (deleter_task_result, batch_producer_task_result) =
|
||||
tokio::join!(s3_deleter.remove_all(), delete_batch_producer.join());
|
||||
|
||||
let deletion_stats = deleter_task_result.context("s3 deletion")?;
|
||||
info!(
|
||||
"Deleted {} tenants ({} keys) and {} timelines ({} keys) total. Dry run: {}",
|
||||
deletion_stats.deleted_tenant_keys.len(),
|
||||
deletion_stats.deleted_tenant_keys.values().sum::<usize>(),
|
||||
deletion_stats.deleted_timeline_keys.len(),
|
||||
deletion_stats.deleted_timeline_keys.values().sum::<usize>(),
|
||||
dry_run,
|
||||
);
|
||||
info!(
|
||||
"Total tenant deletion stats: {:?}",
|
||||
deletion_stats
|
||||
.deleted_tenant_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id.to_string(), key))
|
||||
.collect::<HashMap<_, _>>()
|
||||
);
|
||||
info!(
|
||||
"Total timeline deletion stats: {:?}",
|
||||
deletion_stats
|
||||
.deleted_timeline_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id.to_string(), key))
|
||||
.collect::<HashMap<_, _>>()
|
||||
);
|
||||
|
||||
let batch_producer_stats = batch_producer_task_result.context("delete batch producer join")?;
|
||||
info!(
|
||||
"Total bucket tenants listed: {}; for {} active tenants, timelines checked: {}",
|
||||
batch_producer_stats.tenants_checked(),
|
||||
batch_producer_stats.active_tenants(),
|
||||
batch_producer_stats.timelines_checked()
|
||||
);
|
||||
|
||||
if node_kind == NodeKind::Pageserver {
|
||||
info!("node_kind != pageserver, finish without performing validation step");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if skip_validation {
|
||||
info!("--skip-validation is set, exiting");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!("validating active tenants and timelines for pageserver S3 data");
|
||||
|
||||
// TODO kb real stats for validation + better stats for every place: add and print `min`, `max`, `mean` values at least
|
||||
let validation_stats = checks::validate_pageserver_active_tenant_and_timelines(
|
||||
s3_client,
|
||||
s3_root,
|
||||
cloud_admin_api_client,
|
||||
batch_producer_stats,
|
||||
)
|
||||
.await
|
||||
.context("active tenant and timeline validation")?;
|
||||
info!("Finished active tenant and timeline validation, correct timelines: {}, timeline validation errors: {}",
|
||||
validation_stats.normal_timelines.len(), validation_stats.timelines_with_errors.len());
|
||||
if !validation_stats.timelines_with_errors.is_empty() {
|
||||
warn!(
|
||||
"Validation errors: {:#?}",
|
||||
validation_stats
|
||||
.timelines_with_errors
|
||||
.into_iter()
|
||||
.map(|(id, errors)| (id.to_string(), format!("{errors:?}")))
|
||||
.collect::<HashMap<_, _>>()
|
||||
);
|
||||
}
|
||||
|
||||
info!("Done");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
let bucket_config = BucketConfig::from_env()?;
|
||||
|
||||
match &cli.command {
|
||||
Command::Tidy {
|
||||
node_kind,
|
||||
depth,
|
||||
skip_validation,
|
||||
} => {
|
||||
let console_config = ConsoleConfig::from_env()?;
|
||||
tidy(
|
||||
&cli,
|
||||
bucket_config,
|
||||
console_config,
|
||||
*node_kind,
|
||||
*depth,
|
||||
*skip_validation,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
434
s3_scrubber/src/s3_deletion.rs
Normal file
434
s3_scrubber/src/s3_deletion.rs
Normal file
@@ -0,0 +1,434 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aws_sdk_s3::types::{Delete, ObjectIdentifier};
|
||||
use aws_sdk_s3::Client;
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
use tokio::sync::mpsc::UnboundedReceiver;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinSet;
|
||||
use tracing::{debug, error, info, info_span, Instrument};
|
||||
|
||||
use crate::delete_batch_producer::DeleteBatch;
|
||||
use crate::{list_objects_with_retries, RootTarget, S3Target, TenantId, MAX_RETRIES};
|
||||
use utils::id::TenantTimelineId;
|
||||
|
||||
pub struct S3Deleter {
|
||||
dry_run: bool,
|
||||
concurrent_tasks_count: NonZeroUsize,
|
||||
delete_batch_receiver: Arc<Mutex<UnboundedReceiver<DeleteBatch>>>,
|
||||
s3_client: Arc<Client>,
|
||||
s3_target: RootTarget,
|
||||
}
|
||||
|
||||
impl S3Deleter {
|
||||
pub fn new(
|
||||
dry_run: bool,
|
||||
concurrent_tasks_count: NonZeroUsize,
|
||||
s3_client: Arc<Client>,
|
||||
delete_batch_receiver: Arc<Mutex<UnboundedReceiver<DeleteBatch>>>,
|
||||
s3_target: RootTarget,
|
||||
) -> Self {
|
||||
Self {
|
||||
dry_run,
|
||||
concurrent_tasks_count,
|
||||
delete_batch_receiver,
|
||||
s3_client,
|
||||
s3_target,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn remove_all(self) -> anyhow::Result<DeletionStats> {
|
||||
let mut deletion_tasks = JoinSet::new();
|
||||
for id in 0..self.concurrent_tasks_count.get() {
|
||||
let closure_client = Arc::clone(&self.s3_client);
|
||||
let closure_s3_target = self.s3_target.clone();
|
||||
let closure_batch_receiver = Arc::clone(&self.delete_batch_receiver);
|
||||
let dry_run = self.dry_run;
|
||||
deletion_tasks.spawn(
|
||||
async move {
|
||||
info!("Task started");
|
||||
(
|
||||
id,
|
||||
async move {
|
||||
let mut task_stats = DeletionStats::default();
|
||||
loop {
|
||||
let mut guard = closure_batch_receiver.lock().await;
|
||||
let receiver_result = guard.try_recv();
|
||||
drop(guard);
|
||||
match receiver_result {
|
||||
Ok(batch) => {
|
||||
let stats = delete_batch(
|
||||
&closure_client,
|
||||
&closure_s3_target,
|
||||
batch,
|
||||
dry_run,
|
||||
)
|
||||
.await
|
||||
.context("batch deletion")?;
|
||||
debug!(
|
||||
"Batch processed, number of objects deleted per tenant in the batch is: {}, per timeline — {}",
|
||||
stats.deleted_tenant_keys.len(),
|
||||
stats.deleted_timeline_keys.len(),
|
||||
);
|
||||
task_stats.merge(stats);
|
||||
}
|
||||
Err(TryRecvError::Empty) => {
|
||||
debug!("No tasks yet, waiting");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
Err(TryRecvError::Disconnected) => {
|
||||
info!("Task finished: sender dropped");
|
||||
return Ok(task_stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.in_current_span()
|
||||
.await,
|
||||
)
|
||||
}
|
||||
.instrument(info_span!("deletion_task", %id)),
|
||||
);
|
||||
}
|
||||
|
||||
let mut total_stats = DeletionStats::default();
|
||||
while let Some(task_result) = deletion_tasks.join_next().await {
|
||||
match task_result {
|
||||
Ok((id, Ok(task_stats))) => {
|
||||
info!("Task {id} completed");
|
||||
total_stats.merge(task_stats);
|
||||
}
|
||||
Ok((id, Err(e))) => {
|
||||
error!("Task {id} failed: {e:#}");
|
||||
return Err(e);
|
||||
}
|
||||
Err(join_error) => anyhow::bail!("Failed to join on a task: {join_error:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(total_stats)
|
||||
}
|
||||
}
|
||||
|
||||
/// S3 delete_objects allows up to 1000 keys to be passed in a single request.
|
||||
/// Yet if you pass too many key requests, apparently S3 could return with OK and
|
||||
/// actually delete nothing, so keep the number lower.
|
||||
const MAX_ITEMS_TO_DELETE: usize = 200;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DeletionStats {
|
||||
pub deleted_tenant_keys: BTreeMap<TenantId, usize>,
|
||||
pub deleted_timeline_keys: BTreeMap<TenantTimelineId, usize>,
|
||||
}
|
||||
|
||||
impl DeletionStats {
|
||||
fn merge(&mut self, other: Self) {
|
||||
self.deleted_tenant_keys.extend(other.deleted_tenant_keys);
|
||||
self.deleted_timeline_keys
|
||||
.extend(other.deleted_timeline_keys);
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_batch(
|
||||
s3_client: &Client,
|
||||
s3_target: &RootTarget,
|
||||
batch: DeleteBatch,
|
||||
dry_run: bool,
|
||||
) -> anyhow::Result<DeletionStats> {
|
||||
let (deleted_tenant_keys, deleted_timeline_keys) = tokio::join!(
|
||||
delete_tenants_batch(batch.tenants, s3_target, s3_client, dry_run),
|
||||
delete_timelines_batch(batch.timelines, s3_target, s3_client, dry_run),
|
||||
);
|
||||
|
||||
Ok(DeletionStats {
|
||||
deleted_tenant_keys: deleted_tenant_keys.context("tenant batch deletion")?,
|
||||
deleted_timeline_keys: deleted_timeline_keys.context("timeline batch deletion")?,
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_tenants_batch(
|
||||
batched_tenants: Vec<TenantId>,
|
||||
s3_target: &RootTarget,
|
||||
s3_client: &Client,
|
||||
dry_run: bool,
|
||||
) -> Result<BTreeMap<TenantId, usize>, anyhow::Error> {
|
||||
info!("Deleting tenants batch of size {}", batched_tenants.len());
|
||||
info!("Tenant ids to remove: {batched_tenants:?}");
|
||||
let deleted_keys = delete_elements(
|
||||
&batched_tenants,
|
||||
s3_target,
|
||||
s3_client,
|
||||
dry_run,
|
||||
|root_target, tenant_to_delete| root_target.tenant_root(tenant_to_delete),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !dry_run {
|
||||
let mut last_err = None;
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match ensure_tenant_batch_deleted(s3_client, s3_target, &batched_tenants).await {
|
||||
Ok(()) => {
|
||||
last_err = None;
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to ensure the tenant batch is deleted: {e}");
|
||||
last_err = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(e) = last_err {
|
||||
anyhow::bail!(
|
||||
"Failed to ensure that tenant batch is deleted {MAX_RETRIES} times: {e:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(deleted_keys)
|
||||
}
|
||||
|
||||
async fn delete_timelines_batch(
|
||||
batched_timelines: Vec<TenantTimelineId>,
|
||||
s3_target: &RootTarget,
|
||||
s3_client: &Client,
|
||||
dry_run: bool,
|
||||
) -> Result<BTreeMap<TenantTimelineId, usize>, anyhow::Error> {
|
||||
info!(
|
||||
"Deleting timelines batch of size {}",
|
||||
batched_timelines.len()
|
||||
);
|
||||
info!(
|
||||
"Timeline ids to remove: {:?}",
|
||||
batched_timelines
|
||||
.iter()
|
||||
.map(|id| id.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
let deleted_keys = delete_elements(
|
||||
&batched_timelines,
|
||||
s3_target,
|
||||
s3_client,
|
||||
dry_run,
|
||||
|root_target, timeline_to_delete| root_target.timeline_root(timeline_to_delete),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !dry_run {
|
||||
let mut last_err = None;
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match ensure_timeline_batch_deleted(s3_client, s3_target, &batched_timelines).await {
|
||||
Ok(()) => {
|
||||
last_err = None;
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to ensure the timelines batch is deleted: {e}");
|
||||
last_err = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(e) = last_err {
|
||||
anyhow::bail!(
|
||||
"Failed to ensure that timeline batch is deleted {MAX_RETRIES} times: {e:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(deleted_keys)
|
||||
}
|
||||
|
||||
async fn delete_elements<I>(
|
||||
batched_ids: &Vec<I>,
|
||||
s3_target: &RootTarget,
|
||||
s3_client: &Client,
|
||||
dry_run: bool,
|
||||
target_producer: impl Fn(&RootTarget, I) -> S3Target,
|
||||
) -> Result<BTreeMap<I, usize>, anyhow::Error>
|
||||
where
|
||||
I: Ord + PartialOrd + Copy,
|
||||
{
|
||||
let mut deleted_keys = BTreeMap::new();
|
||||
let mut object_ids_to_delete = Vec::with_capacity(MAX_ITEMS_TO_DELETE);
|
||||
for &id_to_delete in batched_ids {
|
||||
let mut continuation_token = None;
|
||||
let mut subtargets = vec![target_producer(s3_target, id_to_delete)];
|
||||
while let Some(current_target) = subtargets.pop() {
|
||||
loop {
|
||||
let fetch_response = list_objects_with_retries(
|
||||
s3_client,
|
||||
¤t_target,
|
||||
continuation_token.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
for object_id in fetch_response
|
||||
.contents()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|object| object.key())
|
||||
.map(|key| ObjectIdentifier::builder().key(key).build())
|
||||
{
|
||||
if object_ids_to_delete.len() >= MAX_ITEMS_TO_DELETE {
|
||||
let object_ids_for_request = std::mem::replace(
|
||||
&mut object_ids_to_delete,
|
||||
Vec::with_capacity(MAX_ITEMS_TO_DELETE),
|
||||
);
|
||||
send_delete_request(
|
||||
s3_client,
|
||||
s3_target.bucket_name(),
|
||||
object_ids_for_request,
|
||||
dry_run,
|
||||
)
|
||||
.await
|
||||
.context("object ids deletion")?;
|
||||
}
|
||||
|
||||
object_ids_to_delete.push(object_id);
|
||||
*deleted_keys.entry(id_to_delete).or_default() += 1;
|
||||
}
|
||||
|
||||
subtargets.extend(
|
||||
fetch_response
|
||||
.common_prefixes()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|common_prefix| common_prefix.prefix())
|
||||
.map(|prefix| {
|
||||
let mut new_target = current_target.clone();
|
||||
new_target.prefix_in_bucket = prefix.to_string();
|
||||
new_target
|
||||
}),
|
||||
);
|
||||
|
||||
match fetch_response.next_continuation_token {
|
||||
Some(new_token) => continuation_token = Some(new_token),
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !object_ids_to_delete.is_empty() {
|
||||
info!("Removing last objects of the batch");
|
||||
send_delete_request(
|
||||
s3_client,
|
||||
s3_target.bucket_name(),
|
||||
object_ids_to_delete,
|
||||
dry_run,
|
||||
)
|
||||
.await
|
||||
.context("Last object ids deletion")?;
|
||||
}
|
||||
Ok(deleted_keys)
|
||||
}
|
||||
|
||||
pub async fn send_delete_request(
|
||||
s3_client: &Client,
|
||||
bucket_name: &str,
|
||||
ids: Vec<ObjectIdentifier>,
|
||||
dry_run: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
info!("Removing {} object ids from S3", ids.len());
|
||||
info!("Object ids to remove: {ids:?}");
|
||||
let delete_request = s3_client
|
||||
.delete_objects()
|
||||
.bucket(bucket_name)
|
||||
.delete(Delete::builder().set_objects(Some(ids)).build());
|
||||
if dry_run {
|
||||
info!("Dry run, skipping the actual removal");
|
||||
Ok(())
|
||||
} else {
|
||||
let original_request = delete_request.clone();
|
||||
|
||||
for _ in 0..MAX_RETRIES {
|
||||
match delete_request
|
||||
.clone()
|
||||
.send()
|
||||
.await
|
||||
.context("delete request processing")
|
||||
{
|
||||
Ok(delete_response) => {
|
||||
info!("Delete response: {delete_response:?}");
|
||||
match delete_response.errors() {
|
||||
Some(delete_errors) => {
|
||||
error!("Delete request returned errors: {delete_errors:?}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
None => {
|
||||
info!("Successfully removed an object batch from S3");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to send a delete request: {e:#}");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error!("Failed to do deletion, request: {original_request:?}");
|
||||
anyhow::bail!("Failed to run deletion request {MAX_RETRIES} times");
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_tenant_batch_deleted(
|
||||
s3_client: &Client,
|
||||
s3_target: &RootTarget,
|
||||
batch: &[TenantId],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut not_deleted_tenants = Vec::with_capacity(batch.len());
|
||||
|
||||
for &tenant_id in batch {
|
||||
let fetch_response =
|
||||
list_objects_with_retries(s3_client, &s3_target.tenant_root(tenant_id), None).await?;
|
||||
|
||||
if fetch_response.is_truncated()
|
||||
|| fetch_response.contents().is_some()
|
||||
|| fetch_response.common_prefixes().is_some()
|
||||
{
|
||||
error!(
|
||||
"Tenant {tenant_id} should be deleted, but its list response is {fetch_response:?}"
|
||||
);
|
||||
not_deleted_tenants.push(tenant_id);
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::ensure!(
|
||||
not_deleted_tenants.is_empty(),
|
||||
"Failed to delete all tenants in a batch. Tenants {not_deleted_tenants:?} should be deleted."
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn ensure_timeline_batch_deleted(
|
||||
s3_client: &Client,
|
||||
s3_target: &RootTarget,
|
||||
batch: &[TenantTimelineId],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut not_deleted_timelines = Vec::with_capacity(batch.len());
|
||||
|
||||
for &id in batch {
|
||||
let fetch_response =
|
||||
list_objects_with_retries(s3_client, &s3_target.timeline_root(id), None).await?;
|
||||
|
||||
if fetch_response.is_truncated()
|
||||
|| fetch_response.contents().is_some()
|
||||
|| fetch_response.common_prefixes().is_some()
|
||||
{
|
||||
error!("Timeline {id} should be deleted, but its list response is {fetch_response:?}");
|
||||
not_deleted_timelines.push(id);
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::ensure!(
|
||||
not_deleted_timelines.is_empty(),
|
||||
"Failed to delete all timelines in a batch"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -604,6 +604,7 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue(
|
||||
checkpoint_allowed_to_fail.set()
|
||||
env.pageserver.allowed_errors.append(
|
||||
".* ERROR .*Error processing HTTP request: InternalServerError\\(timeline is Stopping"
|
||||
".* ERROR .*[Cc]ould not flush frozen layer.*"
|
||||
)
|
||||
|
||||
# Generous timeout, because currently deletions can get blocked waiting for compaction
|
||||
|
||||
@@ -14,13 +14,17 @@ publish = false
|
||||
### BEGIN HAKARI SECTION
|
||||
[dependencies]
|
||||
anyhow = { version = "1", features = ["backtrace"] }
|
||||
aws-config = { version = "0.56", default-features = false, features = ["credentials-sso", "rustls"] }
|
||||
aws-runtime = { version = "0.56", default-features = false, features = ["event-stream"] }
|
||||
aws-sigv4 = { version = "0.56", features = ["sign-eventstream"] }
|
||||
aws-smithy-http = { version = "0.56", default-features = false, features = ["event-stream", "rt-tokio"] }
|
||||
axum = { version = "0.6", features = ["ws"] }
|
||||
base64 = { version = "0.21", features = ["alloc"] }
|
||||
bytes = { version = "1", features = ["serde"] }
|
||||
chrono = { version = "0.4", default-features = false, features = ["clock", "serde"] }
|
||||
clap = { version = "4", features = ["derive", "string"] }
|
||||
clap_builder = { version = "4", default-features = false, features = ["color", "help", "std", "string", "suggestions", "usage"] }
|
||||
crossbeam-utils = { version = "0.8" }
|
||||
digest = { version = "0.10", features = ["mac", "std"] }
|
||||
either = { version = "1" }
|
||||
fail = { version = "0.5", default-features = false, features = ["failpoints"] }
|
||||
futures = { version = "0.3" }
|
||||
@@ -29,6 +33,7 @@ futures-core = { version = "0.3" }
|
||||
futures-executor = { version = "0.3" }
|
||||
futures-sink = { version = "0.3" }
|
||||
futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
|
||||
hex = { version = "0.4", features = ["serde"] }
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
itertools = { version = "0.10" }
|
||||
libc = { version = "0.2", features = ["extra_traits"] }
|
||||
@@ -44,14 +49,15 @@ regex = { version = "1" }
|
||||
regex-syntax = { version = "0.7" }
|
||||
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "multipart", "rustls-tls"] }
|
||||
ring = { version = "0.16", features = ["std"] }
|
||||
rustls = { version = "0.20", features = ["dangerous_configuration"] }
|
||||
rustls = { version = "0.21", features = ["dangerous_configuration"] }
|
||||
scopeguard = { version = "1" }
|
||||
serde = { version = "1", features = ["alloc", "derive"] }
|
||||
serde_json = { version = "1", features = ["raw_value"] }
|
||||
smallvec = { version = "1", default-features = false, features = ["write"] }
|
||||
socket2 = { version = "0.4", default-features = false, features = ["all"] }
|
||||
time = { version = "0.3", features = ["formatting", "macros", "parsing"] }
|
||||
tokio = { version = "1", features = ["fs", "io-std", "io-util", "macros", "net", "process", "rt-multi-thread", "signal", "test-util"] }
|
||||
tokio-rustls = { version = "0.23" }
|
||||
tokio-rustls = { version = "0.24" }
|
||||
tokio-util = { version = "0.7", features = ["codec", "io"] }
|
||||
toml_datetime = { version = "0.6", default-features = false, features = ["serde"] }
|
||||
toml_edit = { version = "0.19", features = ["serde"] }
|
||||
@@ -59,6 +65,7 @@ tower = { version = "0.4", features = ["balance", "buffer", "limit", "retry", "t
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-core = { version = "0.1" }
|
||||
url = { version = "2", features = ["serde"] }
|
||||
uuid = { version = "1", features = ["serde", "v4"] }
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = { version = "1", features = ["backtrace"] }
|
||||
@@ -74,7 +81,7 @@ prost = { version = "0.11" }
|
||||
regex = { version = "1" }
|
||||
regex-syntax = { version = "0.7" }
|
||||
serde = { version = "1", features = ["alloc", "derive"] }
|
||||
syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "full", "visit", "visit-mut"] }
|
||||
syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "full", "visit-mut"] }
|
||||
syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "full", "visit"] }
|
||||
syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "full", "visit", "visit-mut"] }
|
||||
|
||||
### END HAKARI SECTION
|
||||
|
||||
Reference in New Issue
Block a user