mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-08 05:52:55 +00:00
Support single and two safekeeper scenarios (#11483)
In tests and when one safekeeper is down in small regions, we need to contend with one or two safekeepers. Before, we gave an error in `safekeepers_for_new_timeline`. Now we just silently allow the timeline to be created on one or two safekeepers. Part of #9011
This commit is contained in:
@@ -151,11 +151,39 @@ impl Service {
|
||||
"Got {} non-successful responses from initial creation request of total {total_result_count} responses",
|
||||
remaining.len()
|
||||
);
|
||||
if remaining.len() >= 2 {
|
||||
let target_sk_count = timeline_persistence.sk_set.len();
|
||||
let quorum_size = match target_sk_count {
|
||||
0 => {
|
||||
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
||||
"timeline configured without any safekeepers",
|
||||
)));
|
||||
}
|
||||
1 | 2 => {
|
||||
#[cfg(feature = "testing")]
|
||||
{
|
||||
// In test settings, it is allowed to have one or two safekeepers
|
||||
target_sk_count
|
||||
}
|
||||
#[cfg(not(feature = "testing"))]
|
||||
{
|
||||
// The region is misconfigured: we need at least three safekeepers to be configured
|
||||
// in order to schedule work to them
|
||||
tracing::warn!(
|
||||
"couldn't find at least 3 safekeepers for timeline, found: {:?}",
|
||||
timeline_persistence.sk_set
|
||||
);
|
||||
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
||||
"couldn't find at least 3 safekeepers to put timeline to"
|
||||
)));
|
||||
}
|
||||
}
|
||||
_ => target_sk_count / 2 + 1,
|
||||
};
|
||||
let success_count = target_sk_count - remaining.len();
|
||||
if success_count < quorum_size {
|
||||
// Failure
|
||||
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
||||
"not enough successful reconciliations to reach quorum, please retry: {} errored",
|
||||
remaining.len()
|
||||
"not enough successful reconciliations to reach quorum size: {success_count} of {quorum_size} of total {target_sk_count}"
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -492,8 +520,6 @@ impl Service {
|
||||
pub(crate) async fn safekeepers_for_new_timeline(
|
||||
&self,
|
||||
) -> Result<Vec<SafekeeperInfo>, ApiError> {
|
||||
// Number of safekeepers in different AZs we are looking for
|
||||
let wanted_count = 3;
|
||||
let mut all_safekeepers = {
|
||||
let locked = self.inner.read().unwrap();
|
||||
locked
|
||||
@@ -532,6 +558,19 @@ impl Service {
|
||||
sk.1.id.0,
|
||||
)
|
||||
});
|
||||
// Number of safekeepers in different AZs we are looking for
|
||||
let wanted_count = match all_safekeepers.len() {
|
||||
0 => {
|
||||
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
||||
"couldn't find any active safekeeper for new timeline",
|
||||
)));
|
||||
}
|
||||
// Have laxer requirements on testig mode as we don't want to
|
||||
// spin up three safekeepers for every single test
|
||||
#[cfg(feature = "testing")]
|
||||
1 | 2 => all_safekeepers.len(),
|
||||
_ => 3,
|
||||
};
|
||||
let mut sks = Vec::new();
|
||||
let mut azs = HashSet::new();
|
||||
for (_sk_util, sk_info, az_id) in all_safekeepers.iter() {
|
||||
|
||||
Reference in New Issue
Block a user