Refactor CLI and CLI<->pageserver interfaces to support remote pageserver

This patch started as an effort to support CLI working against remote
pageserver, but turned into a pretty big refactoring.

* CLI now does not look into repository files directly. New commands
'branch_create' and 'identify_system' were introduced into page_service to
support that.
* Branch management that was scattered between local_env and
zenith/main.rs is moved into pageserver/branches.rs. That code could better fit
in Repository/Timeline impl, but I'll leave that for a different patch.
* All tests-related code from local_env went into integration_tests/src/lib.rs as an
extension to PostgresNode trait.
* Paths-generating functions were concentrated around corresponding config
types (LocalEnv and PageserverConf).
This commit is contained in:
Stas Kelvich
2021-05-16 17:19:36 +03:00
parent 53ea6702bd
commit 746f667311
27 changed files with 1317 additions and 1082 deletions

View File

@@ -1,11 +0,0 @@
// test node resettlement to an empty datadir
// TODO
/*
#[test]
fn test_resettlement() {}
// test seq scan of everythin after restart
#[test]
fn test_cold_seqscan() {}
*/

View File

@@ -1,8 +0,0 @@
// TODO
/*
#[test]
fn test_actions() {}
#[test]
fn test_regress() {}
*/

View File

@@ -1,23 +1,22 @@
// mod control_plane;
use control_plane::compute::ComputeControlPlane;
use control_plane::local_env;
use control_plane::local_env::PointInTime;
use control_plane::storage::TestStorageControlPlane;
use integration_tests;
use integration_tests::TestStorageControlPlane;
use integration_tests::PostgresNodeExt;
// XXX: force all redo at the end
// -- restart + seqscan won't read deleted stuff
// -- pageserver api endpoint to check all rels
#[test]
fn test_redo_cases() {
let local_env = local_env::test_env("test_redo_cases");
let local_env = integration_tests::create_test_env("test_redo_cases");
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_node(maintli);
let node = compute_cplane.new_test_node("main");
node.start().unwrap();
// check basic work with table
@@ -51,15 +50,14 @@ fn test_redo_cases() {
// Runs pg_regress on a compute node
#[test]
fn test_regress() {
let local_env = local_env::test_env("test_regress");
let local_env = integration_tests::create_test_env("test_regress");
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_node(maintli);
let node = compute_cplane.new_test_node("main");
node.start().unwrap();
let status = node.pg_regress();
@@ -69,15 +67,14 @@ fn test_regress() {
// Runs pg_bench on a compute node
#[test]
fn pgbench() {
let local_env = local_env::test_env("pgbench");
let local_env = integration_tests::create_test_env("pgbench");
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_node(maintli);
let node = compute_cplane.new_test_node("main");
node.start().unwrap();
let status = node.pg_bench(10, 5);
@@ -87,30 +84,21 @@ fn pgbench() {
// Run two postgres instances on one pageserver, on different timelines
#[test]
fn test_pageserver_two_timelines() {
let local_env = local_env::test_env("test_pageserver_two_timelines");
let local_env = integration_tests::create_test_env("test_pageserver_two_timelines");
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
let maintli = storage_cplane.get_branch_timeline("main");
// Create new branch at the end of 'main'
let startpoint = local_env::find_end_of_wal(&local_env, maintli).unwrap();
local_env::create_branch(
&local_env,
"experimental",
PointInTime {
timelineid: maintli,
lsn: startpoint,
},
)
.unwrap();
let experimentaltli = storage_cplane.get_branch_timeline("experimental");
storage_cplane
.pageserver
.branch_create("experimental", "main")
.unwrap();
// Launch postgres instances on both branches
let node1 = compute_cplane.new_test_node(maintli);
let node2 = compute_cplane.new_test_node(experimentaltli);
let node1 = compute_cplane.new_test_node("main");
let node2 = compute_cplane.new_test_node("experimental");
node1.start().unwrap();
node2.start().unwrap();

View File

@@ -1,21 +1,20 @@
// Restart acceptors one by one while compute is under the load.
use control_plane::compute::ComputeControlPlane;
use control_plane::local_env;
use control_plane::local_env::PointInTime;
use control_plane::storage::TestStorageControlPlane;
use pageserver::ZTimelineId;
use rand::Rng;
use std::sync::Arc;
use std::time::SystemTime;
use std::{thread, time};
use control_plane::compute::ComputeControlPlane;
use integration_tests;
use integration_tests::TestStorageControlPlane;
use integration_tests::PostgresNodeExt;
const DOWNTIME: u64 = 2;
#[test]
//#[ignore]
fn test_embedded_wal_proposer() {
let local_env = local_env::test_env("test_embedded_wal_proposer");
let local_env = integration_tests::create_test_env("test_embedded_wal_proposer");
const REDUNDANCY: usize = 3;
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
@@ -23,8 +22,7 @@ fn test_embedded_wal_proposer() {
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_master_node(maintli);
let node = compute_cplane.new_test_master_node("main");
node.append_conf(
"postgresql.conf",
&format!("wal_acceptors='{}'\n", wal_acceptors),
@@ -52,7 +50,7 @@ fn test_embedded_wal_proposer() {
#[test]
fn test_acceptors_normal_work() {
let local_env = local_env::test_env("test_acceptors_normal_work");
let local_env = integration_tests::create_test_env("test_acceptors_normal_work");
const REDUNDANCY: usize = 3;
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
@@ -60,8 +58,7 @@ fn test_acceptors_normal_work() {
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_master_node(maintli);
let node = compute_cplane.new_test_master_node("main");
node.start().unwrap();
// start proxy
@@ -93,36 +90,25 @@ fn test_many_timelines() {
// Initialize a new repository, and set up WAL safekeepers and page server.
const REDUNDANCY: usize = 3;
const N_TIMELINES: usize = 5;
let local_env = local_env::test_env("test_many_timelines");
let local_env = integration_tests::create_test_env("test_many_timelines");
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// Create branches
let mut timelines: Vec<ZTimelineId> = Vec::new();
let maintli = storage_cplane.get_branch_timeline("main"); // main branch
timelines.push(maintli);
let startpoint = local_env::find_end_of_wal(&local_env, maintli).unwrap();
let mut timelines: Vec<String> = Vec::new();
timelines.push("main".to_string());
for i in 1..N_TIMELINES {
// additional branches
let branchname = format!("experimental{}", i);
local_env::create_branch(
&local_env,
&branchname,
PointInTime {
timelineid: maintli,
lsn: startpoint,
},
)
.unwrap();
let tli = storage_cplane.get_branch_timeline(&branchname);
timelines.push(tli);
storage_cplane.pageserver.branch_create(&branchname, "main").unwrap();
timelines.push(branchname);
}
// start postgres on each timeline
let mut nodes = Vec::new();
for tli in timelines {
let node = compute_cplane.new_test_node(tli);
for tli_name in timelines {
let node = compute_cplane.new_test_node(&tli_name);
nodes.push(node.clone());
node.start().unwrap();
node.start_proxy(&wal_acceptors);
@@ -159,7 +145,7 @@ fn test_many_timelines() {
// Majority is always alive
#[test]
fn test_acceptors_restarts() {
let local_env = local_env::test_env("test_acceptors_restarts");
let local_env = integration_tests::create_test_env("test_acceptors_restarts");
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 3;
@@ -171,8 +157,7 @@ fn test_acceptors_restarts() {
let mut rng = rand::thread_rng();
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_master_node(maintli);
let node = compute_cplane.new_test_master_node("main");
node.start().unwrap();
// start proxy
@@ -222,7 +207,7 @@ fn start_acceptor(cplane: &Arc<TestStorageControlPlane>, no: usize) {
// N_CRASHES env var
#[test]
fn test_acceptors_unavailability() {
let local_env = local_env::test_env("test_acceptors_unavailability");
let local_env = integration_tests::create_test_env("test_acceptors_unavailability");
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 2;
@@ -232,8 +217,7 @@ fn test_acceptors_unavailability() {
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_master_node(maintli);
let node = compute_cplane.new_test_master_node("main");
node.start().unwrap();
// start proxy
@@ -307,7 +291,7 @@ fn simulate_failures(cplane: Arc<TestStorageControlPlane>) {
// Race condition test
#[test]
fn test_race_conditions() {
let local_env = local_env::test_env("test_race_conditions");
let local_env = integration_tests::create_test_env("test_race_conditions");
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 3;
@@ -319,8 +303,7 @@ fn test_race_conditions() {
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgres
let maintli = storage_cplane.get_branch_timeline("main");
let node = compute_cplane.new_test_master_node(maintli);
let node = compute_cplane.new_test_master_node("main");
node.start().unwrap();
// start proxy