Compare commits

..

1 Commits

Author SHA1 Message Date
Stas Kelvich
5325f1e9d0 Set libpq path for pg_dump in fast_import 2024-12-20 13:49:21 +02:00
6 changed files with 6 additions and 96 deletions

41
Cargo.lock generated
View File

@@ -3945,7 +3945,6 @@ dependencies = [
"serde_json",
"serde_path_to_error",
"serde_with",
"serial_test",
"smallvec",
"storage_broker",
"strum",
@@ -5613,15 +5612,6 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "scc"
version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94b13f8ea6177672c49d12ed964cca44836f59621981b04a3e26b87e675181de"
dependencies = [
"sdd",
]
[[package]]
name = "schannel"
version = "0.1.23"
@@ -5662,12 +5652,6 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32"
[[package]]
name = "sdd"
version = "3.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "478f121bb72bbf63c52c93011ea1791dca40140dfe13f8336c4c5ac952c33aa9"
[[package]]
name = "sec1"
version = "0.3.0"
@@ -5957,31 +5941,6 @@ dependencies = [
"serde",
]
[[package]]
name = "serial_test"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9"
dependencies = [
"futures",
"log",
"once_cell",
"parking_lot 0.12.1",
"scc",
"serial_test_derive",
]
[[package]]
name = "serial_test_derive"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.90",
]
[[package]]
name = "sha1"
version = "0.10.5"

View File

@@ -161,7 +161,6 @@ serde_json = "1"
serde_path_to_error = "0.1"
serde_with = { version = "2.0", features = [ "base64" ] }
serde_assert = "0.5.0"
serial_test = "3.2.0"
sha2 = "0.10.2"
signal-hook = "0.3"
smallvec = "1.11"

View File

@@ -172,6 +172,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
.args(["-c", &format!("max_worker_processes={nproc}")])
.args(["-c", "effective_io_concurrency=100"])
.env_clear()
.env("LD_LIBRARY_PATH", "/usr/local/lib/")
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
@@ -256,6 +257,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
.arg(&source_connection_string)
// how we run it
.env_clear()
.env("LD_LIBRARY_PATH", "/usr/local/lib/")
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
@@ -289,6 +291,7 @@ pub(crate) async fn main() -> anyhow::Result<()> {
.arg(&dumpdir)
// how we run it
.env_clear()
.env("LD_LIBRARY_PATH", "/usr/local/lib/")
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())

View File

@@ -94,7 +94,6 @@ procfs.workspace = true
[dev-dependencies]
criterion.workspace = true
hex-literal.workspace = true
serial_test.workspace = true
tokio = { workspace = true, features = ["process", "sync", "fs", "rt", "io-util", "time", "test-util"] }
indoc.workspace = true

View File

@@ -1,6 +1,5 @@
use anyhow::Context;
use camino::{Utf8Path, Utf8PathBuf};
use once_cell::sync::Lazy;
use pageserver_api::keyspace::KeySpace;
use pageserver_api::models::HistoricLayerInfo;
use pageserver_api::shard::{ShardIdentity, ShardIndex, TenantShardId};
@@ -2097,36 +2096,6 @@ impl Default for LayerImplMetrics {
}
impl LayerImplMetrics {
/// Resets the layer metrics to 0, for use in tests. Since this is a global static, metrics will
/// be shared across tests, and must be reset in each test case.
#[cfg(test)]
fn reset(&self) {
// Destructure to error on new fields.
let LayerImplMetrics {
started_evictions,
completed_evictions,
cancelled_evictions,
started_deletes,
completed_deletes,
failed_deletes,
rare_counters,
inits_cancelled,
redownload_after,
time_to_evict,
} = self;
started_evictions.reset();
completed_evictions.reset();
cancelled_evictions.values().for_each(|c| c.reset());
started_deletes.reset();
completed_deletes.reset();
failed_deletes.values().for_each(|c| c.reset());
rare_counters.values().for_each(|c| c.reset());
inits_cancelled.reset();
redownload_after.local().clear();
time_to_evict.local().clear();
}
fn inc_started_evictions(&self) {
self.started_evictions.inc();
}
@@ -2278,4 +2247,5 @@ impl RareEvent {
}
}
pub(crate) static LAYER_IMPL_METRICS: Lazy<LayerImplMetrics> = Lazy::new(LayerImplMetrics::default);
pub(crate) static LAYER_IMPL_METRICS: once_cell::sync::Lazy<LayerImplMetrics> =
once_cell::sync::Lazy::new(LayerImplMetrics::default);

View File

@@ -1,7 +1,6 @@
use std::time::UNIX_EPOCH;
use pageserver_api::key::CONTROLFILE_KEY;
use serial_test::serial;
use tokio::task::JoinSet;
use utils::{
completion::{self, Completion},
@@ -22,10 +21,7 @@ const FOREVER: std::time::Duration = std::time::Duration::from_secs(ADVANCE.as_s
/// Demonstrate the API and resident -> evicted -> resident -> deleted transitions.
#[tokio::test]
#[serial]
async fn smoke_test() {
LAYER_IMPL_METRICS.reset();
let handle = tokio::runtime::Handle::current();
let h = TenantHarness::create("smoke_test").await.unwrap();
@@ -202,10 +198,7 @@ async fn smoke_test() {
/// This test demonstrates a previous hang when a eviction and deletion were requested at the same
/// time. Now both of them complete per Arc drop semantics.
#[tokio::test(start_paused = true)]
#[serial]
async fn evict_and_wait_on_wanted_deleted() {
LAYER_IMPL_METRICS.reset();
// this is the runtime on which Layer spawns the blocking tasks on
let handle = tokio::runtime::Handle::current();
@@ -282,10 +275,7 @@ async fn evict_and_wait_on_wanted_deleted() {
/// This test ensures we are able to read the layer while the layer eviction has been
/// started but not completed.
#[test]
#[serial]
fn read_wins_pending_eviction() {
LAYER_IMPL_METRICS.reset();
let rt = tokio::runtime::Builder::new_current_thread()
.max_blocking_threads(1)
.enable_all()
@@ -405,7 +395,6 @@ fn read_wins_pending_eviction() {
/// Use failpoint to delay an eviction starting to get a VersionCheckFailed.
#[test]
#[serial]
fn multiple_pending_evictions_in_order() {
let name = "multiple_pending_evictions_in_order";
let in_order = true;
@@ -414,7 +403,6 @@ fn multiple_pending_evictions_in_order() {
/// Use failpoint to reorder later eviction before first to get a UnexpectedEvictedState.
#[test]
#[serial]
fn multiple_pending_evictions_out_of_order() {
let name = "multiple_pending_evictions_out_of_order";
let in_order = false;
@@ -422,8 +410,6 @@ fn multiple_pending_evictions_out_of_order() {
}
fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) {
LAYER_IMPL_METRICS.reset();
let rt = tokio::runtime::Builder::new_current_thread()
.max_blocking_threads(1)
.enable_all()
@@ -601,10 +587,7 @@ fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) {
/// disk but the layer internal state says it has not been initialized. Futhermore, it allows us to
/// have non-repairing `Layer::is_likely_resident`.
#[tokio::test(start_paused = true)]
#[serial]
async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() {
LAYER_IMPL_METRICS.reset();
let handle = tokio::runtime::Handle::current();
let h = TenantHarness::create("cancelled_get_or_maybe_download_does_not_cancel_eviction")
.await
@@ -682,8 +665,8 @@ async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() {
}
#[tokio::test(start_paused = true)]
#[serial]
async fn evict_and_wait_does_not_wait_for_download() {
// let handle = tokio::runtime::Handle::current();
let h = TenantHarness::create("evict_and_wait_does_not_wait_for_download")
.await
.unwrap();
@@ -776,13 +759,10 @@ async fn evict_and_wait_does_not_wait_for_download() {
///
/// Also checks that the same does not happen on a non-evicted layer (regression test).
#[tokio::test(start_paused = true)]
#[serial]
async fn eviction_cancellation_on_drop() {
use bytes::Bytes;
use pageserver_api::value::Value;
LAYER_IMPL_METRICS.reset();
// this is the runtime on which Layer spawns the blocking tasks on
let handle = tokio::runtime::Handle::current();