mirror of
https://github.com/neondatabase/neon.git
synced 2026-05-17 21:20:37 +00:00
Compare commits
4 Commits
rc/proxy/2
...
jcsp/s3-dr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
448de0ba36 | ||
|
|
123d1f7ca6 | ||
|
|
5ec6862bcf | ||
|
|
94138c1a28 |
11
Cargo.lock
generated
11
Cargo.lock
generated
@@ -3901,7 +3901,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "postgres"
|
||||
version = "0.19.4"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#988d0ddb4184c408fa7fc1bd0ecca7993c02978f"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
@@ -3914,7 +3914,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "postgres-native-tls"
|
||||
version = "0.5.0"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#988d0ddb4184c408fa7fc1bd0ecca7993c02978f"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2"
|
||||
dependencies = [
|
||||
"native-tls",
|
||||
"tokio",
|
||||
@@ -3925,7 +3925,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "postgres-protocol"
|
||||
version = "0.6.4"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#988d0ddb4184c408fa7fc1bd0ecca7993c02978f"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2"
|
||||
dependencies = [
|
||||
"base64 0.20.0",
|
||||
"byteorder",
|
||||
@@ -3938,12 +3938,13 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"sha2",
|
||||
"stringprep",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-types"
|
||||
version = "0.2.4"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#988d0ddb4184c408fa7fc1bd0ecca7993c02978f"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
@@ -5945,7 +5946,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.7"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#988d0ddb4184c408fa7fc1bd0ecca7993c02978f"
|
||||
source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
|
||||
@@ -1,27 +1,60 @@
|
||||
use std::{alloc::Layout, cmp::Ordering, ops::RangeBounds};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum VecMapOrdering {
|
||||
Greater,
|
||||
GreaterOrEqual,
|
||||
}
|
||||
|
||||
/// Ordered map datastructure implemented in a Vec.
|
||||
/// Append only - can only add keys that are larger than the
|
||||
/// current max key.
|
||||
/// Ordering can be adjusted using [`VecMapOrdering`]
|
||||
/// during `VecMap` construction.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VecMap<K, V>(Vec<(K, V)>);
|
||||
pub struct VecMap<K, V> {
|
||||
data: Vec<(K, V)>,
|
||||
ordering: VecMapOrdering,
|
||||
}
|
||||
|
||||
impl<K, V> Default for VecMap<K, V> {
|
||||
fn default() -> Self {
|
||||
VecMap(Default::default())
|
||||
VecMap {
|
||||
data: Default::default(),
|
||||
ordering: VecMapOrdering::Greater,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct InvalidKey;
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum VecMapError {
|
||||
#[error("Key violates ordering constraint")]
|
||||
InvalidKey,
|
||||
#[error("Mismatched ordering constraints")]
|
||||
ExtendOrderingError,
|
||||
}
|
||||
|
||||
impl<K: Ord, V> VecMap<K, V> {
|
||||
pub fn new(ordering: VecMapOrdering) -> Self {
|
||||
Self {
|
||||
data: Vec::new(),
|
||||
ordering,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_capacity(capacity: usize, ordering: VecMapOrdering) -> Self {
|
||||
Self {
|
||||
data: Vec::with_capacity(capacity),
|
||||
ordering,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
pub fn as_slice(&self) -> &[(K, V)] {
|
||||
self.0.as_slice()
|
||||
self.data.as_slice()
|
||||
}
|
||||
|
||||
/// This function may panic if given a range where the lower bound is
|
||||
@@ -29,7 +62,7 @@ impl<K: Ord, V> VecMap<K, V> {
|
||||
pub fn slice_range<R: RangeBounds<K>>(&self, range: R) -> &[(K, V)] {
|
||||
use std::ops::Bound::*;
|
||||
|
||||
let binary_search = |k: &K| self.0.binary_search_by_key(&k, extract_key);
|
||||
let binary_search = |k: &K| self.data.binary_search_by_key(&k, extract_key);
|
||||
|
||||
let start_idx = match range.start_bound() {
|
||||
Unbounded => 0,
|
||||
@@ -41,7 +74,7 @@ impl<K: Ord, V> VecMap<K, V> {
|
||||
};
|
||||
|
||||
let end_idx = match range.end_bound() {
|
||||
Unbounded => self.0.len(),
|
||||
Unbounded => self.data.len(),
|
||||
Included(k) => match binary_search(k) {
|
||||
Ok(idx) => idx + 1,
|
||||
Err(idx) => idx,
|
||||
@@ -49,34 +82,30 @@ impl<K: Ord, V> VecMap<K, V> {
|
||||
Excluded(k) => binary_search(k).unwrap_or_else(std::convert::identity),
|
||||
};
|
||||
|
||||
&self.0[start_idx..end_idx]
|
||||
&self.data[start_idx..end_idx]
|
||||
}
|
||||
|
||||
/// Add a key value pair to the map.
|
||||
/// If `key` is less than or equal to the current maximum key
|
||||
/// the pair will not be added and InvalidKey error will be returned.
|
||||
pub fn append(&mut self, key: K, value: V) -> Result<usize, InvalidKey> {
|
||||
if let Some((last_key, _last_value)) = self.0.last() {
|
||||
if &key <= last_key {
|
||||
return Err(InvalidKey);
|
||||
}
|
||||
}
|
||||
/// If `key` is not respective of the `self` ordering the
|
||||
/// pair will not be added and `InvalidKey` error will be returned.
|
||||
pub fn append(&mut self, key: K, value: V) -> Result<usize, VecMapError> {
|
||||
self.validate_key_order(&key)?;
|
||||
|
||||
let delta_size = self.instrument_vec_op(|vec| vec.push((key, value)));
|
||||
Ok(delta_size)
|
||||
}
|
||||
|
||||
/// Update the maximum key value pair or add a new key value pair to the map.
|
||||
/// If `key` is less than the current maximum key no updates or additions
|
||||
/// will occur and InvalidKey error will be returned.
|
||||
/// If `key` is not respective of the `self` ordering no updates or additions
|
||||
/// will occur and `InvalidKey` error will be returned.
|
||||
pub fn append_or_update_last(
|
||||
&mut self,
|
||||
key: K,
|
||||
mut value: V,
|
||||
) -> Result<(Option<V>, usize), InvalidKey> {
|
||||
if let Some((last_key, last_value)) = self.0.last_mut() {
|
||||
) -> Result<(Option<V>, usize), VecMapError> {
|
||||
if let Some((last_key, last_value)) = self.data.last_mut() {
|
||||
match key.cmp(last_key) {
|
||||
Ordering::Less => return Err(InvalidKey),
|
||||
Ordering::Less => return Err(VecMapError::InvalidKey),
|
||||
Ordering::Equal => {
|
||||
std::mem::swap(last_value, &mut value);
|
||||
const DELTA_SIZE: usize = 0;
|
||||
@@ -100,40 +129,67 @@ impl<K: Ord, V> VecMap<K, V> {
|
||||
V: Clone,
|
||||
{
|
||||
let split_idx = self
|
||||
.0
|
||||
.data
|
||||
.binary_search_by_key(&cutoff, extract_key)
|
||||
.unwrap_or_else(std::convert::identity);
|
||||
|
||||
(
|
||||
VecMap(self.0[..split_idx].to_vec()),
|
||||
VecMap(self.0[split_idx..].to_vec()),
|
||||
VecMap {
|
||||
data: self.data[..split_idx].to_vec(),
|
||||
ordering: self.ordering,
|
||||
},
|
||||
VecMap {
|
||||
data: self.data[split_idx..].to_vec(),
|
||||
ordering: self.ordering,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Move items from `other` to the end of `self`, leaving `other` empty.
|
||||
/// If any keys in `other` is less than or equal to any key in `self`,
|
||||
/// `InvalidKey` error will be returned and no mutation will occur.
|
||||
pub fn extend(&mut self, other: &mut Self) -> Result<usize, InvalidKey> {
|
||||
let self_last_opt = self.0.last().map(extract_key);
|
||||
let other_first_opt = other.0.last().map(extract_key);
|
||||
/// If the `other` ordering is different from `self` ordering
|
||||
/// `ExtendOrderingError` error will be returned.
|
||||
/// If any keys in `other` is not respective of the ordering defined in
|
||||
/// `self`, `InvalidKey` error will be returned and no mutation will occur.
|
||||
pub fn extend(&mut self, other: &mut Self) -> Result<usize, VecMapError> {
|
||||
if self.ordering != other.ordering {
|
||||
return Err(VecMapError::ExtendOrderingError);
|
||||
}
|
||||
|
||||
if let (Some(self_last), Some(other_first)) = (self_last_opt, other_first_opt) {
|
||||
if self_last >= other_first {
|
||||
return Err(InvalidKey);
|
||||
let other_first_opt = other.data.last().map(extract_key);
|
||||
if let Some(other_first) = other_first_opt {
|
||||
self.validate_key_order(other_first)?;
|
||||
}
|
||||
|
||||
let delta_size = self.instrument_vec_op(|vec| vec.append(&mut other.data));
|
||||
Ok(delta_size)
|
||||
}
|
||||
|
||||
/// Validate the current last key in `self` and key being
|
||||
/// inserted against the order defined in `self`.
|
||||
fn validate_key_order(&self, key: &K) -> Result<(), VecMapError> {
|
||||
if let Some(last_key) = self.data.last().map(extract_key) {
|
||||
match (&self.ordering, &key.cmp(last_key)) {
|
||||
(VecMapOrdering::Greater, Ordering::Less | Ordering::Equal) => {
|
||||
return Err(VecMapError::InvalidKey);
|
||||
}
|
||||
(VecMapOrdering::Greater, Ordering::Greater) => {}
|
||||
(VecMapOrdering::GreaterOrEqual, Ordering::Less) => {
|
||||
return Err(VecMapError::InvalidKey);
|
||||
}
|
||||
(VecMapOrdering::GreaterOrEqual, Ordering::Equal | Ordering::Greater) => {}
|
||||
}
|
||||
}
|
||||
|
||||
let delta_size = self.instrument_vec_op(|vec| vec.append(&mut other.0));
|
||||
Ok(delta_size)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Instrument an operation on the underlying [`Vec`].
|
||||
/// Will panic if the operation decreases capacity.
|
||||
/// Returns the increase in memory usage caused by the op.
|
||||
fn instrument_vec_op(&mut self, op: impl FnOnce(&mut Vec<(K, V)>)) -> usize {
|
||||
let old_cap = self.0.capacity();
|
||||
op(&mut self.0);
|
||||
let new_cap = self.0.capacity();
|
||||
let old_cap = self.data.capacity();
|
||||
op(&mut self.data);
|
||||
let new_cap = self.data.capacity();
|
||||
|
||||
match old_cap.cmp(&new_cap) {
|
||||
Ordering::Less => {
|
||||
@@ -145,6 +201,36 @@ impl<K: Ord, V> VecMap<K, V> {
|
||||
Ordering::Greater => panic!("VecMap capacity shouldn't ever decrease"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to `from_iter` defined in `FromIter` trait except
|
||||
/// that it accepts an [`VecMapOrdering`]
|
||||
pub fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I, ordering: VecMapOrdering) -> Self {
|
||||
let iter = iter.into_iter();
|
||||
let initial_capacity = {
|
||||
match iter.size_hint() {
|
||||
(lower_bound, None) => lower_bound,
|
||||
(_, Some(upper_bound)) => upper_bound,
|
||||
}
|
||||
};
|
||||
|
||||
let mut vec_map = VecMap::with_capacity(initial_capacity, ordering);
|
||||
for (key, value) in iter {
|
||||
vec_map
|
||||
.append(key, value)
|
||||
.expect("The passed collection needs to be sorted!");
|
||||
}
|
||||
|
||||
vec_map
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Ord, V> IntoIterator for VecMap<K, V> {
|
||||
type Item = (K, V);
|
||||
type IntoIter = std::vec::IntoIter<(K, V)>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.data.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_key<K, V>(entry: &(K, V)) -> &K {
|
||||
@@ -155,7 +241,7 @@ fn extract_key<K, V>(entry: &(K, V)) -> &K {
|
||||
mod tests {
|
||||
use std::{collections::BTreeMap, ops::Bound};
|
||||
|
||||
use super::VecMap;
|
||||
use super::{VecMap, VecMapOrdering};
|
||||
|
||||
#[test]
|
||||
fn unbounded_range() {
|
||||
@@ -310,5 +396,59 @@ mod tests {
|
||||
left.extend(&mut one_map).unwrap_err();
|
||||
assert_eq!(left.as_slice(), &[(0, ()), (1, ())]);
|
||||
assert_eq!(one_map.as_slice(), &[(1, ())]);
|
||||
|
||||
let mut map_greater_or_equal = VecMap::new(VecMapOrdering::GreaterOrEqual);
|
||||
map_greater_or_equal.append(2, ()).unwrap();
|
||||
map_greater_or_equal.append(2, ()).unwrap();
|
||||
|
||||
left.extend(&mut map_greater_or_equal).unwrap_err();
|
||||
assert_eq!(left.as_slice(), &[(0, ()), (1, ())]);
|
||||
assert_eq!(map_greater_or_equal.as_slice(), &[(2, ()), (2, ())]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extend_with_ordering() {
|
||||
let mut left = VecMap::new(VecMapOrdering::GreaterOrEqual);
|
||||
left.append(0, ()).unwrap();
|
||||
assert_eq!(left.as_slice(), &[(0, ())]);
|
||||
|
||||
let mut greater_right = VecMap::new(VecMapOrdering::Greater);
|
||||
greater_right.append(0, ()).unwrap();
|
||||
left.extend(&mut greater_right).unwrap_err();
|
||||
assert_eq!(left.as_slice(), &[(0, ())]);
|
||||
|
||||
let mut greater_or_equal_right = VecMap::new(VecMapOrdering::GreaterOrEqual);
|
||||
greater_or_equal_right.append(2, ()).unwrap();
|
||||
greater_or_equal_right.append(2, ()).unwrap();
|
||||
left.extend(&mut greater_or_equal_right).unwrap();
|
||||
assert_eq!(left.as_slice(), &[(0, ()), (2, ()), (2, ())]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vec_map_from_sorted() {
|
||||
let vec = vec![(1, ()), (2, ()), (3, ()), (6, ())];
|
||||
let vec_map = VecMap::from_iter(vec, VecMapOrdering::Greater);
|
||||
assert_eq!(vec_map.as_slice(), &[(1, ()), (2, ()), (3, ()), (6, ())]);
|
||||
|
||||
let vec = vec![(1, ()), (2, ()), (3, ()), (3, ()), (6, ()), (6, ())];
|
||||
let vec_map = VecMap::from_iter(vec, VecMapOrdering::GreaterOrEqual);
|
||||
assert_eq!(
|
||||
vec_map.as_slice(),
|
||||
&[(1, ()), (2, ()), (3, ()), (3, ()), (6, ()), (6, ())]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn vec_map_from_unsorted_greater() {
|
||||
let vec = vec![(1, ()), (2, ()), (2, ()), (3, ()), (6, ())];
|
||||
let _ = VecMap::from_iter(vec, VecMapOrdering::Greater);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn vec_map_from_unsorted_greater_or_equal() {
|
||||
let vec = vec![(1, ()), (2, ()), (3, ()), (6, ()), (5, ())];
|
||||
let _ = VecMap::from_iter(vec, VecMapOrdering::GreaterOrEqual);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ use strum::IntoEnumIterator;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, trace, warn};
|
||||
use utils::bin_ser::DeserializeError;
|
||||
use utils::vec_map::{VecMap, VecMapOrdering};
|
||||
use utils::{bin_ser::BeSer, lsn::Lsn};
|
||||
|
||||
const MAX_AUX_FILE_DELTAS: usize = 1024;
|
||||
@@ -1546,12 +1547,13 @@ impl<'a> DatadirModification<'a> {
|
||||
if !self.pending_updates.is_empty() {
|
||||
// The put_batch call below expects expects the inputs to be sorted by Lsn,
|
||||
// so we do that first.
|
||||
let lsn_ordered_batch: Vec<(Key, Lsn, Value)> = self
|
||||
.pending_updates
|
||||
.drain()
|
||||
.map(|(key, vals)| vals.into_iter().map(move |(lsn, val)| (key, lsn, val)))
|
||||
.kmerge_by(|lhs, rhs| lhs.1 .0 < rhs.1 .0)
|
||||
.collect();
|
||||
let lsn_ordered_batch: VecMap<Lsn, (Key, Value)> = VecMap::from_iter(
|
||||
self.pending_updates
|
||||
.drain()
|
||||
.map(|(key, vals)| vals.into_iter().map(move |(lsn, val)| (lsn, (key, val))))
|
||||
.kmerge_by(|lhs, rhs| lhs.0 < rhs.0),
|
||||
VecMapOrdering::GreaterOrEqual,
|
||||
);
|
||||
|
||||
writer.put_batch(lsn_ordered_batch, ctx).await?;
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ use tracing::*;
|
||||
use utils::{
|
||||
bin_ser::BeSer,
|
||||
sync::gate::{Gate, GateGuard},
|
||||
vec_map::VecMap,
|
||||
};
|
||||
|
||||
use std::ops::{Deref, Range};
|
||||
@@ -4616,16 +4617,15 @@ impl<'a> TimelineWriter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Put a batch keys at the specified Lsns.
|
||||
/// Put a batch of keys at the specified Lsns.
|
||||
///
|
||||
/// The batch should be sorted by Lsn such that it's safe
|
||||
/// to roll the open layer mid batch.
|
||||
/// The batch is sorted by Lsn (enforced by usage of [`utils::vec_map::VecMap`].
|
||||
pub(crate) async fn put_batch(
|
||||
&mut self,
|
||||
batch: Vec<(Key, Lsn, Value)>,
|
||||
batch: VecMap<Lsn, (Key, Value)>,
|
||||
ctx: &RequestContext,
|
||||
) -> anyhow::Result<()> {
|
||||
for (key, lsn, val) in batch {
|
||||
for (lsn, (key, val)) in batch {
|
||||
self.put(key, lsn, &val, ctx).await?
|
||||
}
|
||||
|
||||
|
||||
@@ -135,9 +135,10 @@ impl TestAuth for NoAuth {}
|
||||
struct Scram(scram::ServerSecret);
|
||||
|
||||
impl Scram {
|
||||
fn new(password: &str) -> anyhow::Result<Self> {
|
||||
let secret =
|
||||
scram::ServerSecret::build(password).context("failed to generate scram secret")?;
|
||||
async fn new(password: &str) -> anyhow::Result<Self> {
|
||||
let secret = scram::ServerSecret::build(password)
|
||||
.await
|
||||
.context("failed to generate scram secret")?;
|
||||
Ok(Scram(secret))
|
||||
}
|
||||
|
||||
@@ -284,7 +285,7 @@ async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> {
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::new(password)?,
|
||||
Scram::new(password).await?,
|
||||
));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
@@ -308,7 +309,7 @@ async fn scram_auth_disable_channel_binding() -> anyhow::Result<()> {
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::new("password")?,
|
||||
Scram::new("password").await?,
|
||||
));
|
||||
|
||||
let (_client, _conn) = tokio_postgres::Config::new()
|
||||
|
||||
@@ -148,7 +148,7 @@ async fn scram_auth_disable_channel_binding() -> anyhow::Result<()> {
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::new("password")?,
|
||||
Scram::new("password").await?,
|
||||
));
|
||||
|
||||
let _client_err = tokio_postgres::Config::new()
|
||||
@@ -231,7 +231,7 @@ async fn connect_failure(
|
||||
let proxy = tokio::spawn(dummy_proxy(
|
||||
client,
|
||||
Some(server_config),
|
||||
Scram::new("password")?,
|
||||
Scram::new("password").await?,
|
||||
));
|
||||
|
||||
let _client_err = tokio_postgres::Config::new()
|
||||
|
||||
@@ -114,7 +114,7 @@ mod tests {
|
||||
}
|
||||
|
||||
async fn run_round_trip_test(server_password: &str, client_password: &str) {
|
||||
let scram_secret = ServerSecret::build(server_password).unwrap();
|
||||
let scram_secret = ServerSecret::build(server_password).await.unwrap();
|
||||
let sasl_client =
|
||||
ScramSha256::new(client_password.as_bytes(), ChannelBinding::unsupported());
|
||||
|
||||
|
||||
@@ -86,14 +86,7 @@ pub async fn exchange(
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
|
||||
let sent = match init.transition(secret, &tls_server_end_point, client_first)? {
|
||||
Continue(sent, server_first) => {
|
||||
// `client.update` might perform `pbkdf2(pw)`, best to spawn it in a blocking thread.
|
||||
// TODO(conrad): take this code from tokio-postgres and make an async-aware pbkdf2 impl
|
||||
client = tokio::task::spawn_blocking(move || {
|
||||
client.update(server_first.as_bytes())?;
|
||||
Ok::<ScramSha256, std::io::Error>(client)
|
||||
})
|
||||
.await
|
||||
.expect("should not panic while performing password hash")?;
|
||||
client.update(server_first.as_bytes()).await?;
|
||||
sent
|
||||
}
|
||||
Success(x, _) => match x {},
|
||||
|
||||
@@ -59,10 +59,8 @@ impl ServerSecret {
|
||||
/// Build a new server secret from the prerequisites.
|
||||
/// XXX: We only use this function in tests.
|
||||
#[cfg(test)]
|
||||
pub fn build(password: &str) -> Option<Self> {
|
||||
Self::parse(&postgres_protocol::password::scram_sha_256(
|
||||
password.as_bytes(),
|
||||
))
|
||||
pub async fn build(password: &str) -> Option<Self> {
|
||||
Self::parse(&postgres_protocol::password::scram_sha_256(password.as_bytes()).await)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1617,6 +1617,8 @@ class NeonCli(AbstractNeonCli):
|
||||
if timeline_id is not None:
|
||||
cmd.extend(["--timeline-id", str(timeline_id)])
|
||||
|
||||
log.info(f"create_timeline: {cmd}")
|
||||
|
||||
res = self.raw_cli(cmd)
|
||||
res.check_returncode()
|
||||
|
||||
@@ -1648,6 +1650,8 @@ class NeonCli(AbstractNeonCli):
|
||||
if ancestor_start_lsn is not None:
|
||||
cmd.extend(["--ancestor-start-lsn", str(ancestor_start_lsn)])
|
||||
|
||||
log.info(f"create_branch: {cmd}")
|
||||
|
||||
res = self.raw_cli(cmd)
|
||||
res.check_returncode()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from datetime import datetime, timezone
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
last_flush_lsn_upload
|
||||
)
|
||||
from fixtures.pageserver.utils import (
|
||||
MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
@@ -16,6 +17,7 @@ from fixtures.pageserver.utils import (
|
||||
from fixtures.remote_storage import RemoteStorageKind, s3_storage
|
||||
from fixtures.types import Lsn
|
||||
from fixtures.utils import run_pg_bench_small
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
def test_tenant_s3_restore(
|
||||
@@ -51,20 +53,31 @@ def test_tenant_s3_restore(
|
||||
|
||||
# create two timelines one being the parent of another, both with non-trivial data
|
||||
parent = None
|
||||
last_flush_lsns = []
|
||||
last_flush_lsns = {}
|
||||
|
||||
for timeline in ["first", "second"]:
|
||||
timeline_ids = {}
|
||||
with env.endpoints.create_start("main", tenant_id=env.initial_tenant) as endpoint:
|
||||
endpoint.safe_psql(f"CREATE TABLE created_main(id integer);")
|
||||
flushed = last_flush_lsn_upload(env, endpoint, env.initial_tenant, env.initial_timeline)
|
||||
log.info(f"Timeline main/{env.initial_timeline} last_flush_lsn: {flushed}");
|
||||
last_flush_lsns["main"] = flushed
|
||||
timeline_ids["main"] = env.initial_timeline
|
||||
|
||||
for branch in ["first", "second"]:
|
||||
timeline_id = env.neon_cli.create_branch(
|
||||
timeline, tenant_id=tenant_id, ancestor_branch_name=parent
|
||||
branch, tenant_id=tenant_id, ancestor_branch_name=parent
|
||||
)
|
||||
with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint:
|
||||
timeline_ids[branch] = timeline_id
|
||||
with env.endpoints.create_start(branch, tenant_id=tenant_id) as endpoint:
|
||||
run_pg_bench_small(pg_bin, endpoint.connstr())
|
||||
endpoint.safe_psql(f"CREATE TABLE created_{timeline}(id integer);")
|
||||
endpoint.safe_psql(f"CREATE TABLE created_{branch}(id integer);")
|
||||
last_flush_lsn = Lsn(endpoint.safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0])
|
||||
last_flush_lsns.append(last_flush_lsn)
|
||||
last_flush_lsns[branch] = last_flush_lsn
|
||||
|
||||
log.info(f"Timeline {branch}/{timeline_id} last_flush_lsn: {last_flush_lsn}");
|
||||
ps_http.timeline_checkpoint(tenant_id, timeline_id)
|
||||
wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn)
|
||||
parent = timeline
|
||||
parent = branch
|
||||
|
||||
# These sleeps are important because they fend off differences in clocks between us and S3
|
||||
time.sleep(4)
|
||||
@@ -107,12 +120,21 @@ def test_tenant_s3_restore(
|
||||
|
||||
ps_http.tenant_attach(tenant_id, generation=generation)
|
||||
env.pageserver.quiesce_tenants()
|
||||
|
||||
for branch in ["main", "first", "second"]:
|
||||
timeline_id = timeline_ids[branch]
|
||||
detail = ps_http.timeline_detail(tenant_id, timeline_id)
|
||||
log.info(f"Timeline {branch}/{timeline_id} detail: {detail}")
|
||||
|
||||
for i, timeline in enumerate(["first", "second"]):
|
||||
with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint:
|
||||
endpoint.safe_psql(f"SELECT * FROM created_{timeline};")
|
||||
# We expect that we restored pageserver state up to last_flush_lsn, because we flushed that to
|
||||
# remote storage before we set our recovery timestamp.
|
||||
#assert Lsn(detail["last_record_lsn"]) >= last_flush_lsns[branch]
|
||||
|
||||
# Check that we can indeed read from this recovered timeline
|
||||
with env.endpoints.create_start(branch, tenant_id=tenant_id) as endpoint:
|
||||
endpoint.safe_psql(f"SELECT * FROM created_{branch};")
|
||||
last_flush_lsn = Lsn(endpoint.safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0])
|
||||
expected_last_flush_lsn = last_flush_lsns[i]
|
||||
expected_last_flush_lsn = last_flush_lsns[branch]
|
||||
# There might be some activity that advances the lsn so we can't use a strict equality check
|
||||
assert last_flush_lsn >= expected_last_flush_lsn, "last_flush_lsn too old"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user