diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 63b809a786..715c0753af 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -65,17 +65,8 @@ BASE_PORT = 15000 WORKER_PORT_NUM = 1000 -# These are set in pytest_configure() -base_dir = "" -neon_binpath = "" -pg_distrib_dir = "" -top_output_dir = "" -default_pg_version = "" - - def pytest_configure(config): """ - Ensure that no unwanted daemons are running before we start testing. Check that we do not overflow available ports range. """ @@ -85,67 +76,89 @@ def pytest_configure(config): ): # do not use ephemeral ports raise Exception("Too many workers configured. Cannot distribute ports for services.") + +@pytest.fixture(scope="session") +def base_dir() -> Iterator[Path]: # find the base directory (currently this is the git root) - global base_dir - base_dir = os.path.normpath(os.path.join(get_self_dir(), "../..")) + base_dir = get_self_dir().parent.parent log.info(f"base_dir is {base_dir}") - # Compute the top-level directory for all tests. - global top_output_dir - env_test_output = os.environ.get("TEST_OUTPUT") - if env_test_output is not None: - top_output_dir = env_test_output - else: - top_output_dir = os.path.join(base_dir, DEFAULT_OUTPUT_DIR) - Path(top_output_dir).mkdir(exist_ok=True) + yield base_dir - # Find the postgres installation. - global default_pg_version - log.info(f"default_pg_version is {default_pg_version}") - env_default_pg_version = os.environ.get("DEFAULT_PG_VERSION") - if env_default_pg_version: - default_pg_version = env_default_pg_version - log.info(f"default_pg_version is set to {default_pg_version}") - else: - default_pg_version = DEFAULT_PG_VERSION_DEFAULT - - global pg_distrib_dir - - env_postgres_bin = os.environ.get("POSTGRES_DISTRIB_DIR") - if env_postgres_bin: - pg_distrib_dir = env_postgres_bin - else: - pg_distrib_dir = os.path.normpath(os.path.join(base_dir, "pg_install")) - - log.info(f"pg_distrib_dir is {pg_distrib_dir}") - psql_bin_path = os.path.join(pg_distrib_dir, "v{}".format(default_pg_version), "bin/psql") - postgres_bin_path = os.path.join( - pg_distrib_dir, "v{}".format(default_pg_version), "bin/postgres" - ) - - if os.getenv("REMOTE_ENV"): - # When testing against a remote server, we only need the client binary. - if not os.path.exists(psql_bin_path): - raise Exception('psql not found at "{}"'.format(psql_bin_path)) - else: - if not os.path.exists(postgres_bin_path): - raise Exception('postgres not found at "{}"'.format(postgres_bin_path)) +@pytest.fixture(scope="session") +def neon_binpath(base_dir: Path) -> Iterator[Path]: if os.getenv("REMOTE_ENV"): # we are in remote env and do not have neon binaries locally # this is the case for benchmarks run on self-hosted runner return + # Find the neon binaries. - global neon_binpath - env_neon_bin = os.environ.get("NEON_BIN") - if env_neon_bin: - neon_binpath = env_neon_bin + if env_neon_bin := os.environ.get("NEON_BIN"): + binpath = Path(env_neon_bin) else: build_type = os.environ.get("BUILD_TYPE", "debug") - neon_binpath = os.path.join(base_dir, "target", build_type) - log.info(f"neon_binpath is {neon_binpath}") - if not os.path.exists(os.path.join(neon_binpath, "pageserver")): - raise Exception('neon binaries not found at "{}"'.format(neon_binpath)) + binpath = base_dir / "target" / build_type + log.info(f"neon_binpath is {binpath}") + + if not (binpath / "pageserver").exists(): + raise Exception(f"neon binaries not found at '{binpath}'") + + yield binpath + + +@pytest.fixture(scope="session") +def pg_distrib_dir(base_dir: Path) -> Iterator[Path]: + if env_postgres_bin := os.environ.get("POSTGRES_DISTRIB_DIR"): + distrib_dir = Path(env_postgres_bin).resolve() + else: + distrib_dir = base_dir / "pg_install" + + log.info(f"pg_distrib_dir is {distrib_dir}") + yield distrib_dir + + +@pytest.fixture(scope="session") +def top_output_dir(base_dir: Path) -> Iterator[Path]: + # Compute the top-level directory for all tests. + if env_test_output := os.environ.get("TEST_OUTPUT"): + output_dir = Path(env_test_output).resolve() + else: + output_dir = base_dir / DEFAULT_OUTPUT_DIR + output_dir.mkdir(exist_ok=True) + + log.info(f"top_output_dir is {output_dir}") + yield output_dir + + +@pytest.fixture(scope="session") +def pg_version() -> Iterator[str]: + if env_default_pg_version := os.environ.get("DEFAULT_PG_VERSION"): + version = env_default_pg_version + else: + version = DEFAULT_PG_VERSION_DEFAULT + + log.info(f"pg_version is {version}") + yield version + + +@pytest.fixture(scope="session") +def versioned_pg_distrib_dir(pg_distrib_dir: Path, pg_version: str) -> Iterator[Path]: + versioned_dir = pg_distrib_dir / f"v{pg_version}" + + psql_bin_path = versioned_dir / "bin/psql" + postgres_bin_path = versioned_dir / "bin/postgres" + + if os.getenv("REMOTE_ENV"): + # When testing against a remote server, we only need the client binary. + if not psql_bin_path.exists(): + raise Exception(f"psql not found at '{psql_bin_path}'") + else: + if not postgres_bin_path.exists: + raise Exception(f"postgres not found at '{postgres_bin_path}'") + + log.info(f"versioned_pg_distrib_dir is {versioned_dir}") + yield versioned_dir def shareable_scope(fixture_name, config) -> Literal["session", "function"]: @@ -232,16 +245,18 @@ def port_distributor(worker_base_port): @pytest.fixture(scope="session") -def default_broker(request: Any, port_distributor: PortDistributor): +def default_broker(request: Any, port_distributor: PortDistributor, top_output_dir: Path): client_port = port_distributor.get_port() # multiple pytest sessions could get launched in parallel, get them different datadirs - etcd_datadir = os.path.join(get_test_output_dir(request), f"etcd_datadir_{client_port}") - Path(etcd_datadir).mkdir(exist_ok=True, parents=True) + etcd_datadir = get_test_output_dir(request, top_output_dir) / f"etcd_datadir_{client_port}" + etcd_datadir.mkdir(exist_ok=True, parents=True) - broker = Etcd(datadir=etcd_datadir, port=client_port, peer_port=port_distributor.get_port()) + broker = Etcd( + datadir=str(etcd_datadir), port=client_port, peer_port=port_distributor.get_port() + ) yield broker broker.stop() - allure_attach_from_dir(Path(etcd_datadir)) + allure_attach_from_dir(etcd_datadir) @pytest.fixture(scope="session") @@ -521,6 +536,9 @@ class NeonEnvBuilder: broker: Etcd, run_id: uuid.UUID, mock_s3_server: MockS3Server, + neon_binpath: Path, + pg_distrib_dir: Path, + pg_version: str, remote_storage: Optional[RemoteStorage] = None, remote_storage_users: RemoteStorageUsers = RemoteStorageUsers.PAGESERVER, pageserver_config_override: Optional[str] = None, @@ -550,7 +568,9 @@ class NeonEnvBuilder: self.env: Optional[NeonEnv] = None self.remote_storage_prefix: Optional[str] = None self.keep_remote_storage_contents: bool = True - self.pg_version = default_pg_version + self.neon_binpath = neon_binpath + self.pg_distrib_dir = pg_distrib_dir + self.pg_version = pg_version def init(self) -> NeonEnv: # Cannot create more than one environment from one builder @@ -766,6 +786,8 @@ class NeonEnv: self.remote_storage = config.remote_storage self.remote_storage_users = config.remote_storage_users self.pg_version = config.pg_version + self.neon_binpath = config.neon_binpath + self.pg_distrib_dir = config.pg_distrib_dir # generate initial tenant ID here instead of letting 'neon init' generate it, # so that we don't need to dig it out of the config file afterwards. @@ -861,7 +883,7 @@ class NeonEnv: return self.repo_dir / "tenants" / str(tenant_id) / "timelines" / str(timeline_id) def get_pageserver_version(self) -> str: - bin_pageserver = os.path.join(str(neon_binpath), "pageserver") + bin_pageserver = str(self.neon_binpath / "pageserver") res = subprocess.run( [bin_pageserver, "--version"], check=True, @@ -885,6 +907,10 @@ def _shared_simple_env( mock_s3_server: MockS3Server, default_broker: Etcd, run_id: uuid.UUID, + top_output_dir: Path, + neon_binpath: Path, + pg_distrib_dir: Path, + pg_version: str, ) -> Iterator[NeonEnv]: """ # Internal fixture backing the `neon_simple_env` fixture. If TEST_SHARED_FIXTURES @@ -893,17 +919,20 @@ def _shared_simple_env( if os.environ.get("TEST_SHARED_FIXTURES") is None: # Create the environment in the per-test output directory - repo_dir = os.path.join(get_test_output_dir(request), "repo") + repo_dir = get_test_output_dir(request, top_output_dir) / "repo" else: # We're running shared fixtures. Share a single directory. - repo_dir = os.path.join(str(top_output_dir), "shared_repo") + repo_dir = top_output_dir / "shared_repo" shutil.rmtree(repo_dir, ignore_errors=True) with NeonEnvBuilder( - repo_dir=Path(repo_dir), + repo_dir=repo_dir, port_distributor=port_distributor, broker=default_broker, mock_s3_server=mock_s3_server, + neon_binpath=neon_binpath, + pg_distrib_dir=pg_distrib_dir, + pg_version=pg_version, run_id=run_id, ) as builder: env = builder.init_start() @@ -934,6 +963,9 @@ def neon_env_builder( test_output_dir, port_distributor: PortDistributor, mock_s3_server: MockS3Server, + neon_binpath: Path, + pg_distrib_dir: Path, + pg_version: str, default_broker: Etcd, run_id: uuid.UUID, ) -> Iterator[NeonEnvBuilder]: @@ -958,6 +990,9 @@ def neon_env_builder( repo_dir=Path(repo_dir), port_distributor=port_distributor, mock_s3_server=mock_s3_server, + neon_binpath=neon_binpath, + pg_distrib_dir=pg_distrib_dir, + pg_version=pg_version, broker=default_broker, run_id=run_id, ) as builder: @@ -1240,7 +1275,7 @@ class AbstractNeonCli(abc.ABC): assert type(arguments) == list assert type(self.COMMAND) == str - bin_neon = os.path.join(str(neon_binpath), self.COMMAND) + bin_neon = str(self.env.neon_binpath / self.COMMAND) args = [bin_neon] + arguments log.info('Running command "{}"'.format(" ".join(args))) @@ -1248,7 +1283,7 @@ class AbstractNeonCli(abc.ABC): env_vars = os.environ.copy() env_vars["NEON_REPO_DIR"] = str(self.env.repo_dir) - env_vars["POSTGRES_DISTRIB_DIR"] = str(pg_distrib_dir) + env_vars["POSTGRES_DISTRIB_DIR"] = str(self.env.pg_distrib_dir) if self.env.rust_log_override is not None: env_vars["RUST_LOG"] = self.env.rust_log_override for (extra_env_key, extra_env_value) in (extra_env_vars or {}).items(): @@ -1723,17 +1758,17 @@ def append_pageserver_param_overrides( class PgBin: """A helper class for executing postgres binaries""" - def __init__(self, log_dir: Path, pg_version: str): + def __init__(self, log_dir: Path, pg_distrib_dir: Path, pg_version: str): self.log_dir = log_dir self.pg_version = pg_version - self.pg_bin_path = os.path.join(str(pg_distrib_dir), "v{}".format(pg_version), "bin") - self.pg_lib_dir = os.path.join(str(pg_distrib_dir), "v{}".format(pg_version), "lib") + self.pg_bin_path = pg_distrib_dir / f"v{pg_version}" / "bin" + self.pg_lib_dir = pg_distrib_dir / f"v{pg_version}" / "lib" self.env = os.environ.copy() - self.env["LD_LIBRARY_PATH"] = self.pg_lib_dir + self.env["LD_LIBRARY_PATH"] = str(self.pg_lib_dir) def _fixpath(self, command: List[str]): - if "/" not in command[0]: - command[0] = os.path.join(self.pg_bin_path, command[0]) + if "/" not in str(command[0]): + command[0] = str(self.pg_bin_path / command[0]) def _build_env(self, env_add: Optional[Env]) -> Env: if env_add is None: @@ -1757,7 +1792,7 @@ class PgBin: """ self._fixpath(command) - log.info('Running command "{}"'.format(" ".join(command))) + log.info(f"Running command '{' '.join(command)}'") env = self._build_env(env) subprocess.run(command, env=env, cwd=cwd, check=True) @@ -1776,16 +1811,14 @@ class PgBin: """ self._fixpath(command) - log.info('Running command "{}"'.format(" ".join(command))) + log.info(f"Running command '{' '.join(command)}'") env = self._build_env(env) - return subprocess_capture( - str(self.log_dir), command, env=env, cwd=cwd, check=True, **kwargs - ) + return subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True, **kwargs) @pytest.fixture(scope="function") -def pg_bin(test_output_dir: Path, pg_version: str) -> PgBin: - return PgBin(test_output_dir, pg_version) +def pg_bin(test_output_dir: Path, pg_distrib_dir: Path, pg_version: str) -> PgBin: + return PgBin(test_output_dir, pg_distrib_dir, pg_version) class VanillaPostgres(PgProtocol): @@ -1832,19 +1865,15 @@ class VanillaPostgres(PgProtocol): self.stop() -@pytest.fixture(scope="session") -def pg_version() -> str: - return default_pg_version - - @pytest.fixture(scope="function") def vanilla_pg( test_output_dir: Path, port_distributor: PortDistributor, + pg_distrib_dir: Path, pg_version: str, ) -> Iterator[VanillaPostgres]: pgdatadir = test_output_dir / "pgdata-vanilla" - pg_bin = PgBin(test_output_dir, pg_version) + pg_bin = PgBin(test_output_dir, pg_distrib_dir, pg_version) port = port_distributor.get_port() with VanillaPostgres(pgdatadir, pg_bin, port) as vanilla_pg: yield vanilla_pg @@ -1880,8 +1909,10 @@ class RemotePostgres(PgProtocol): @pytest.fixture(scope="function") -def remote_pg(test_output_dir: Path, pg_version: str) -> Iterator[RemotePostgres]: - pg_bin = PgBin(test_output_dir, pg_version) +def remote_pg( + test_output_dir: Path, pg_distrib_dir: Path, pg_version: str +) -> Iterator[RemotePostgres]: + pg_bin = PgBin(test_output_dir, pg_distrib_dir, pg_version) connstr = os.getenv("BENCHMARK_CONNSTR") if connstr is None: @@ -1926,10 +1957,18 @@ class PSQL: class NeonProxy(PgProtocol): - def __init__(self, proxy_port: int, http_port: int, auth_endpoint=None, mgmt_port=None): + def __init__( + self, + proxy_port: int, + http_port: int, + neon_binpath: Path, + auth_endpoint=None, + mgmt_port=None, + ): super().__init__(dsn=auth_endpoint, port=proxy_port) self.host = "127.0.0.1" self.http_port = http_port + self.neon_binpath = neon_binpath self.proxy_port = proxy_port self.mgmt_port = mgmt_port self.auth_endpoint = auth_endpoint @@ -1945,7 +1984,7 @@ class NeonProxy(PgProtocol): # Start proxy args = [ - os.path.join(neon_binpath, "proxy"), + str(self.neon_binpath / "proxy"), *["--http", f"{self.host}:{self.http_port}"], *["--proxy", f"{self.host}:{self.proxy_port}"], *["--auth-backend", "postgres"], @@ -1961,7 +2000,7 @@ class NeonProxy(PgProtocol): assert self._popen is None # Start proxy - bin_proxy = os.path.join(str(neon_binpath), "proxy") + bin_proxy = str(self.neon_binpath / "proxy") args = [bin_proxy] args.extend(["--http", f"{self.host}:{self.http_port}"]) args.extend(["--proxy", f"{self.host}:{self.proxy_port}"]) @@ -1993,18 +2032,18 @@ class NeonProxy(PgProtocol): @pytest.fixture(scope="function") -def link_proxy(port_distributor) -> Iterator[NeonProxy]: +def link_proxy(port_distributor, neon_binpath: Path) -> Iterator[NeonProxy]: """Neon proxy that routes through link auth.""" http_port = port_distributor.get_port() proxy_port = port_distributor.get_port() mgmt_port = port_distributor.get_port() - with NeonProxy(proxy_port, http_port, mgmt_port=mgmt_port) as proxy: + with NeonProxy(proxy_port, http_port, neon_binpath=neon_binpath, mgmt_port=mgmt_port) as proxy: proxy.start_with_link_auth() yield proxy @pytest.fixture(scope="function") -def static_proxy(vanilla_pg, port_distributor) -> Iterator[NeonProxy]: +def static_proxy(vanilla_pg, port_distributor, neon_binpath: Path) -> Iterator[NeonProxy]: """Neon proxy that routes directly to vanilla postgres.""" # For simplicity, we use the same user for both `--auth-endpoint` and `safe_psql` @@ -2020,7 +2059,10 @@ def static_proxy(vanilla_pg, port_distributor) -> Iterator[NeonProxy]: http_port = port_distributor.get_port() with NeonProxy( - proxy_port=proxy_port, http_port=http_port, auth_endpoint=auth_endpoint + proxy_port=proxy_port, + http_port=http_port, + neon_binpath=neon_binpath, + auth_endpoint=auth_endpoint, ) as proxy: proxy.start() yield proxy @@ -2523,10 +2565,10 @@ class Etcd: self.handle.wait() -def get_test_output_dir(request: Any) -> Path: +def get_test_output_dir(request: Any, top_output_dir: Path) -> Path: """Compute the working directory for an individual test.""" test_name = request.node.name - test_dir = Path(top_output_dir) / test_name.replace("/", "-") + test_dir = top_output_dir / test_name.replace("/", "-") log.info(f"get_test_output_dir is {test_dir}") # make mypy happy assert isinstance(test_dir, Path) @@ -2543,11 +2585,11 @@ def get_test_output_dir(request: Any) -> Path: # this fixture ensures that the directory exists. That works because # 'autouse' fixtures are run before other fixtures. @pytest.fixture(scope="function", autouse=True) -def test_output_dir(request: Any) -> Iterator[Path]: +def test_output_dir(request: Any, top_output_dir: Path) -> Iterator[Path]: """Create the working directory for an individual test.""" # one directory per test - test_dir = get_test_output_dir(request) + test_dir = get_test_output_dir(request, top_output_dir) log.info(f"test_output_dir is {test_dir}") shutil.rmtree(test_dir, ignore_errors=True) test_dir.mkdir() @@ -2639,7 +2681,7 @@ def check_restored_datadir_content( restored_dir_path = env.repo_dir / f"{pg.node_name}_restored_datadir" restored_dir_path.mkdir(exist_ok=True) - pg_bin = PgBin(test_output_dir, env.pg_version) + pg_bin = PgBin(test_output_dir, env.pg_distrib_dir, env.pg_version) psql_path = os.path.join(pg_bin.pg_bin_path, "psql") cmd = rf""" diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 1242305ec3..e73453f2c4 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -15,12 +15,13 @@ from psycopg2.extensions import cursor Fn = TypeVar("Fn", bound=Callable[..., Any]) -def get_self_dir() -> str: +def get_self_dir() -> Path: """Get the path to the directory where this script lives.""" - return os.path.dirname(os.path.abspath(__file__)) + # return os.path.dirname(os.path.abspath(__file__)) + return Path(__file__).resolve().parent -def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str: +def subprocess_capture(capture_dir: Path, cmd: List[str], **kwargs: Any) -> str: """Run a process and capture its output Output will go to files named "cmd_NNN.stdout" and "cmd_NNN.stderr" diff --git a/test_runner/pg_clients/test_pg_clients.py b/test_runner/pg_clients/test_pg_clients.py index 2dbab19e7a..6ffe3bf918 100644 --- a/test_runner/pg_clients/test_pg_clients.py +++ b/test_runner/pg_clients/test_pg_clients.py @@ -46,9 +46,9 @@ def test_pg_clients(test_output_dir: Path, remote_pg: RemotePostgres, client: st raise RuntimeError("docker is required for running this test") build_cmd = [docker_bin, "build", "--tag", image_tag, f"{Path(__file__).parent / client}"] - subprocess_capture(str(test_output_dir), build_cmd, check=True) + subprocess_capture(test_output_dir, build_cmd, check=True) run_cmd = [docker_bin, "run", "--rm", "--env-file", env_file, image_tag] - basepath = subprocess_capture(str(test_output_dir), run_cmd, check=True) + basepath = subprocess_capture(test_output_dir, run_cmd, check=True) assert Path(f"{basepath}.stdout").read_text().strip() == "1" diff --git a/test_runner/regress/test_compatibility.py b/test_runner/regress/test_compatibility.py index 100027048f..b0643ec05e 100644 --- a/test_runner/regress/test_compatibility.py +++ b/test_runner/regress/test_compatibility.py @@ -80,7 +80,12 @@ class PortReplacer(object): @pytest.mark.order(after="test_prepare_snapshot") def test_backward_compatibility( - pg_bin: PgBin, port_distributor: PortDistributor, test_output_dir: Path, request: FixtureRequest + pg_bin: PgBin, + port_distributor: PortDistributor, + test_output_dir: Path, + request: FixtureRequest, + neon_binpath: Path, + pg_distrib_dir: Path, ): compatibility_snapshot_dir = Path( os.environ.get("COMPATIBILITY_SNAPSHOT_DIR", DEFAILT_LOCAL_SNAPSHOT_DIR) @@ -170,6 +175,8 @@ def test_backward_compatibility( config.repo_dir = repo_dir config.pg_version = "14" # Note: `pg_dumpall` (from pg_bin) version is set by DEFAULT_PG_VERSION_DEFAULT and can be overriden by DEFAULT_PG_VERSION env var config.initial_tenant = snapshot_config["default_tenant_id"] + config.neon_binpath = neon_binpath + config.pg_distrib_dir = pg_distrib_dir # Check that we can start the project cli = NeonCli(config) diff --git a/test_runner/regress/test_fullbackup.py b/test_runner/regress/test_fullbackup.py index 0048e7b580..fc515e5878 100644 --- a/test_runner/regress/test_fullbackup.py +++ b/test_runner/regress/test_fullbackup.py @@ -1,13 +1,8 @@ import os +from pathlib import Path from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnvBuilder, - PgBin, - PortDistributor, - VanillaPostgres, - pg_distrib_dir, -) +from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, PortDistributor, VanillaPostgres from fixtures.types import Lsn, TimelineId from fixtures.utils import query_scalar, subprocess_capture @@ -16,7 +11,10 @@ num_rows = 1000 # Ensure that regular postgres can start from fullbackup def test_fullbackup( - neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, port_distributor: PortDistributor + neon_env_builder: NeonEnvBuilder, + pg_bin: PgBin, + port_distributor: PortDistributor, + pg_distrib_dir: Path, ): env = neon_env_builder.init_start() @@ -40,7 +38,7 @@ def test_fullbackup( # Set LD_LIBRARY_PATH in the env properly, otherwise we may use the wrong libpq. # PgBin sets it automatically, but here we need to pipe psql output to the tar command. - psql_env = {"LD_LIBRARY_PATH": os.path.join(str(pg_distrib_dir), "lib")} + psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} # Get and unpack fullbackup from pageserver restored_dir_path = env.repo_dir / "restored_datadir" @@ -49,9 +47,7 @@ def test_fullbackup( cmd = ["psql", "--no-psqlrc", env.pageserver.connstr(), "-c", query] result_basepath = pg_bin.run_capture(cmd, env=psql_env) tar_output_file = result_basepath + ".stdout" - subprocess_capture( - str(env.repo_dir), ["tar", "-xf", tar_output_file, "-C", str(restored_dir_path)] - ) + subprocess_capture(env.repo_dir, ["tar", "-xf", tar_output_file, "-C", str(restored_dir_path)]) # HACK # fullbackup returns neon specific pg_control and first WAL segment diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index c888c6f7ee..ced5e18406 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -13,7 +13,6 @@ from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, Postgres, - pg_distrib_dir, wait_for_last_record_lsn, wait_for_upload, ) @@ -128,7 +127,7 @@ def test_import_from_pageserver_small(pg_bin: PgBin, neon_env_builder: NeonEnvBu num_rows = 3000 lsn = _generate_data(num_rows, pg) - _import(num_rows, lsn, env, pg_bin, timeline) + _import(num_rows, lsn, env, pg_bin, timeline, env.pg_distrib_dir) @pytest.mark.timeout(1800) @@ -156,7 +155,7 @@ def test_import_from_pageserver_multisegment(pg_bin: PgBin, neon_env_builder: Ne log.info(f"timeline logical size = {logical_size / (1024 ** 2)}MB") assert logical_size > 1024**3 # = 1GB - tar_output_file = _import(num_rows, lsn, env, pg_bin, timeline) + tar_output_file = _import(num_rows, lsn, env, pg_bin, timeline, env.pg_distrib_dir) # Check if the backup data contains multiple segment files cnt_seg_files = 0 @@ -191,7 +190,12 @@ def _generate_data(num_rows: int, pg: Postgres) -> Lsn: def _import( - expected_num_rows: int, lsn: Lsn, env: NeonEnv, pg_bin: PgBin, timeline: TimelineId + expected_num_rows: int, + lsn: Lsn, + env: NeonEnv, + pg_bin: PgBin, + timeline: TimelineId, + pg_distrib_dir: Path, ) -> str: """Test importing backup data to the pageserver. @@ -205,7 +209,7 @@ def _import( # Set LD_LIBRARY_PATH in the env properly, otherwise we may use the wrong libpq. # PgBin sets it automatically, but here we need to pipe psql output to the tar command. - psql_env = {"LD_LIBRARY_PATH": os.path.join(str(pg_distrib_dir), "lib")} + psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} # Get a fullbackup from pageserver query = f"fullbackup { env.initial_tenant} {timeline} {lsn}" diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index f5e02af8dd..ab321eeb02 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -1,5 +1,5 @@ -import pathlib import subprocess +from pathlib import Path from typing import Optional from fixtures.neon_fixtures import ( @@ -7,18 +7,18 @@ from fixtures.neon_fixtures import ( NeonEnv, NeonEnvBuilder, PageserverHttpClient, - neon_binpath, - pg_distrib_dir, ) from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import wait_until # test that we cannot override node id after init -def test_pageserver_init_node_id(neon_simple_env: NeonEnv): +def test_pageserver_init_node_id( + neon_simple_env: NeonEnv, neon_binpath: Path, pg_distrib_dir: Path +): repo_dir = neon_simple_env.repo_dir pageserver_config = repo_dir / "pageserver.toml" - pageserver_bin = pathlib.Path(neon_binpath) / "pageserver" + pageserver_bin = neon_binpath / "pageserver" def run_pageserver(args): return subprocess.run( diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index f23811b671..5eb1ebb3de 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -1,11 +1,10 @@ # # This file runs pg_regress-based tests. # -import os from pathlib import Path import pytest -from fixtures.neon_fixtures import NeonEnv, base_dir, check_restored_datadir_content, pg_distrib_dir +from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content # Run the main PostgreSQL regression tests, in src/test/regress. @@ -13,7 +12,14 @@ from fixtures.neon_fixtures import NeonEnv, base_dir, check_restored_datadir_con # This runs for a long time, especially in debug mode, so use a larger-than-default # timeout. @pytest.mark.timeout(1800) -def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys): +def test_pg_regress( + neon_simple_env: NeonEnv, + test_output_dir: Path, + pg_bin, + capsys, + base_dir: Path, + pg_distrib_dir: Path, +): env = neon_simple_env env.neon_cli.create_branch("test_pg_regress", "empty") @@ -26,20 +32,20 @@ def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, cap (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_regress will need. - build_path = os.path.join(pg_distrib_dir, "build/v{}/src/test/regress").format(env.pg_version) - src_path = os.path.join(base_dir, "vendor/postgres-v{}/src/test/regress").format(env.pg_version) - bindir = os.path.join(pg_distrib_dir, "v{}".format(env.pg_version), "bin") - schedule = os.path.join(src_path, "parallel_schedule") - pg_regress = os.path.join(build_path, "pg_regress") + build_path = pg_distrib_dir / f"build/v{env.pg_version}/src/test/regress" + src_path = base_dir / f"vendor/postgres-v{env.pg_version}/src/test/regress" + bindir = pg_distrib_dir / f"v{env.pg_version}/bin" + schedule = src_path / "parallel_schedule" + pg_regress = build_path / "pg_regress" pg_regress_command = [ - pg_regress, + str(pg_regress), '--bindir=""', "--use-existing", - "--bindir={}".format(bindir), - "--dlpath={}".format(build_path), - "--schedule={}".format(schedule), - "--inputdir={}".format(src_path), + f"--bindir={bindir}", + f"--dlpath={build_path}", + f"--schedule={schedule}", + f"--inputdir={src_path}", ] env_vars = { @@ -66,7 +72,14 @@ def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, cap # This runs for a long time, especially in debug mode, so use a larger-than-default # timeout. @pytest.mark.timeout(1800) -def test_isolation(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys): +def test_isolation( + neon_simple_env: NeonEnv, + test_output_dir: Path, + pg_bin, + capsys, + base_dir: Path, + pg_distrib_dir: Path, +): env = neon_simple_env env.neon_cli.create_branch("test_isolation", "empty") @@ -80,21 +93,19 @@ def test_isolation(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, caps (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_isolation_regress will need. - build_path = os.path.join(pg_distrib_dir, "build/v{}/src/test/isolation".format(env.pg_version)) - src_path = os.path.join( - base_dir, "vendor/postgres-v{}/src/test/isolation".format(env.pg_version) - ) - bindir = os.path.join(pg_distrib_dir, "v{}".format(env.pg_version), "bin") - schedule = os.path.join(src_path, "isolation_schedule") - pg_isolation_regress = os.path.join(build_path, "pg_isolation_regress") + build_path = pg_distrib_dir / f"build/v{env.pg_version}/src/test/isolation" + src_path = base_dir / f"vendor/postgres-v{env.pg_version}/src/test/isolation" + bindir = pg_distrib_dir / f"v{env.pg_version}/bin" + schedule = src_path / "isolation_schedule" + pg_isolation_regress = build_path / "pg_isolation_regress" pg_isolation_regress_command = [ - pg_isolation_regress, + str(pg_isolation_regress), "--use-existing", - "--bindir={}".format(bindir), - "--dlpath={}".format(build_path), - "--inputdir={}".format(src_path), - "--schedule={}".format(schedule), + f"--bindir={bindir}", + f"--dlpath={build_path}", + f"--inputdir={src_path}", + f"--schedule={schedule}", ] env_vars = { @@ -112,7 +123,14 @@ def test_isolation(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, caps # Run extra Neon-specific pg_regress-based tests. The tests and their # schedule file are in the sql_regress/ directory. -def test_sql_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys): +def test_sql_regress( + neon_simple_env: NeonEnv, + test_output_dir: Path, + pg_bin, + capsys, + base_dir: Path, + pg_distrib_dir: Path, +): env = neon_simple_env env.neon_cli.create_branch("test_sql_regress", "empty") @@ -126,19 +144,19 @@ def test_sql_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, ca # Compute all the file locations that pg_regress will need. # This test runs neon specific tests - build_path = os.path.join(pg_distrib_dir, "build/v{}/src/test/regress").format(env.pg_version) - src_path = os.path.join(base_dir, "test_runner/sql_regress") - bindir = os.path.join(pg_distrib_dir, "v{}".format(env.pg_version), "bin") - schedule = os.path.join(src_path, "parallel_schedule") - pg_regress = os.path.join(build_path, "pg_regress") + build_path = pg_distrib_dir / f"build/v{env.pg_version}/src/test/regress" + src_path = base_dir / "test_runner/sql_regress" + bindir = pg_distrib_dir / f"v{env.pg_version}/bin" + schedule = src_path / "parallel_schedule" + pg_regress = build_path / "pg_regress" pg_regress_command = [ - pg_regress, + str(pg_regress), "--use-existing", - "--bindir={}".format(bindir), - "--dlpath={}".format(build_path), - "--schedule={}".format(schedule), - "--inputdir={}".format(src_path), + f"--bindir={bindir}", + f"--dlpath={build_path}", + f"--schedule={schedule}", + f"--inputdir={src_path}", ] env_vars = { diff --git a/test_runner/regress/test_tenant_relocation.py b/test_runner/regress/test_tenant_relocation.py index fa00a4da82..aec45307f7 100644 --- a/test_runner/regress/test_tenant_relocation.py +++ b/test_runner/regress/test_tenant_relocation.py @@ -1,7 +1,7 @@ import os -import pathlib import threading from contextlib import closing, contextmanager +from pathlib import Path from typing import Any, Dict, Optional, Tuple import pytest @@ -14,9 +14,6 @@ from fixtures.neon_fixtures import ( PortDistributor, Postgres, assert_no_in_progress_downloads_for_tenant, - base_dir, - neon_binpath, - pg_distrib_dir, wait_for_last_record_lsn, wait_for_upload, ) @@ -30,12 +27,13 @@ def assert_abs_margin_ratio(a: float, b: float, margin_ratio: float): @contextmanager def new_pageserver_service( - new_pageserver_dir: pathlib.Path, - pageserver_bin: pathlib.Path, - remote_storage_mock_path: pathlib.Path, + new_pageserver_dir: Path, + pageserver_bin: Path, + remote_storage_mock_path: Path, pg_port: int, http_port: int, broker: Optional[Etcd], + pg_distrib_dir: Path, ): """ cannot use NeonPageserver yet because it depends on neon cli @@ -193,10 +191,10 @@ def switch_pg_to_new_pageserver( new_pageserver_port: int, tenant_id: TenantId, timeline_id: TimelineId, -) -> pathlib.Path: +) -> Path: pg.stop() - pg_config_file_path = pathlib.Path(pg.config_file_path()) + pg_config_file_path = Path(pg.config_file_path()) pg_config_file_path.open("a").write( f"\nneon.pageserver_connstring = 'postgresql://no_user:@localhost:{new_pageserver_port}'" ) @@ -219,7 +217,7 @@ def switch_pg_to_new_pageserver( return timeline_to_detach_local_path -def post_migration_check(pg: Postgres, sum_before_migration: int, old_local_path: pathlib.Path): +def post_migration_check(pg: Postgres, sum_before_migration: int, old_local_path: Path): with pg_cur(pg) as cur: # check that data is still there cur.execute("SELECT sum(key) FROM t") @@ -251,7 +249,9 @@ def post_migration_check(pg: Postgres, sum_before_migration: int, old_local_path def test_tenant_relocation( neon_env_builder: NeonEnvBuilder, port_distributor: PortDistributor, - test_output_dir, + test_output_dir: Path, + neon_binpath: Path, + base_dir: Path, method: str, with_load: str, ): @@ -350,7 +350,7 @@ def test_tenant_relocation( new_pageserver_pg_port = port_distributor.get_port() new_pageserver_http_port = port_distributor.get_port() log.info("new pageserver ports pg %s http %s", new_pageserver_pg_port, new_pageserver_http_port) - pageserver_bin = pathlib.Path(neon_binpath) / "pageserver" + pageserver_bin = neon_binpath / "pageserver" new_pageserver_http = PageserverHttpClient( port=new_pageserver_http_port, @@ -365,6 +365,7 @@ def test_tenant_relocation( new_pageserver_pg_port, new_pageserver_http_port, neon_env_builder.broker, + neon_env_builder.pg_distrib_dir, ): # Migrate either by attaching from s3 or import/export basebackup @@ -373,7 +374,7 @@ def test_tenant_relocation( "poetry", "run", "python", - os.path.join(base_dir, "scripts/export_import_between_pageservers.py"), + str(base_dir / "scripts/export_import_between_pageservers.py"), "--tenant-id", str(tenant_id), "--from-host", @@ -389,9 +390,9 @@ def test_tenant_relocation( "--to-pg-port", str(new_pageserver_pg_port), "--pg-distrib-dir", - pg_distrib_dir, + str(neon_env_builder.pg_distrib_dir), "--work-dir", - os.path.join(test_output_dir), + str(test_output_dir), "--tmp-pg-port", str(port_distributor.get_port()), ] diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index c87e9a6720..ec2bed7fee 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -338,6 +338,7 @@ def test_timeline_size_metrics( neon_simple_env: NeonEnv, test_output_dir: Path, port_distributor: PortDistributor, + pg_distrib_dir: Path, pg_version: str, ): env = neon_simple_env @@ -382,7 +383,7 @@ def test_timeline_size_metrics( tl_logical_size_metric = int(matches.group(1)) pgdatadir = test_output_dir / "pgdata-vanilla" - pg_bin = PgBin(test_output_dir, pg_version) + pg_bin = PgBin(test_output_dir, pg_distrib_dir, pg_version) port = port_distributor.get_port() with VanillaPostgres(pgdatadir, pg_bin, port) as vanilla_pg: vanilla_pg.configure([f"port={port}"]) diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 09f6f4b9f9..8ef7f27752 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -30,7 +30,6 @@ from fixtures.neon_fixtures import ( SafekeeperHttpClient, SafekeeperPort, available_remote_storages, - neon_binpath, wait_for_last_record_lsn, wait_for_upload, ) @@ -797,6 +796,7 @@ class SafekeeperEnv: repo_dir: Path, port_distributor: PortDistributor, pg_bin: PgBin, + neon_binpath: Path, num_safekeepers: int = 1, ): self.repo_dir = repo_dir @@ -808,7 +808,7 @@ class SafekeeperEnv: ) self.pg_bin = pg_bin self.num_safekeepers = num_safekeepers - self.bin_safekeeper = os.path.join(str(neon_binpath), "safekeeper") + self.bin_safekeeper = str(neon_binpath / "safekeeper") self.safekeepers: Optional[List[subprocess.CompletedProcess[Any]]] = None self.postgres: Optional[ProposerPostgres] = None self.tenant_id: Optional[TenantId] = None @@ -911,7 +911,10 @@ class SafekeeperEnv: def test_safekeeper_without_pageserver( - test_output_dir: str, port_distributor: PortDistributor, pg_bin: PgBin + test_output_dir: str, + port_distributor: PortDistributor, + pg_bin: PgBin, + neon_binpath: Path, ): # Create the environment in the test-specific output dir repo_dir = Path(os.path.join(test_output_dir, "repo")) @@ -920,6 +923,7 @@ def test_safekeeper_without_pageserver( repo_dir, port_distributor, pg_bin, + neon_binpath, ) with env: diff --git a/test_runner/regress/test_wal_restore.py b/test_runner/regress/test_wal_restore.py index db6f1e5137..e1b1e03515 100644 --- a/test_runner/regress/test_wal_restore.py +++ b/test_runner/regress/test_wal_restore.py @@ -1,14 +1,6 @@ -import os from pathlib import Path -from fixtures.neon_fixtures import ( - NeonEnvBuilder, - PgBin, - PortDistributor, - VanillaPostgres, - base_dir, - pg_distrib_dir, -) +from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, PortDistributor, VanillaPostgres from fixtures.types import TenantId @@ -17,6 +9,8 @@ def test_wal_restore( pg_bin: PgBin, test_output_dir: Path, port_distributor: PortDistributor, + base_dir: Path, + pg_distrib_dir: Path, ): env = neon_env_builder.init_start() env.neon_cli.create_branch("test_wal_restore") @@ -26,11 +20,13 @@ def test_wal_restore( env.neon_cli.pageserver_stop() port = port_distributor.get_port() data_dir = test_output_dir / "pgsql.restored" - with VanillaPostgres(data_dir, PgBin(test_output_dir, env.pg_version), port) as restored: + with VanillaPostgres( + data_dir, PgBin(test_output_dir, env.pg_distrib_dir, env.pg_version), port + ) as restored: pg_bin.run_capture( [ - os.path.join(base_dir, "libs/utils/scripts/restore_from_wal.sh"), - os.path.join(pg_distrib_dir, "v{}".format(env.pg_version), "bin"), + str(base_dir / "libs/utils/scripts/restore_from_wal.sh"), + str(pg_distrib_dir / f"v{env.pg_version}/bin"), str(test_output_dir / "repo" / "safekeepers" / "sk1" / str(tenant_id) / "*"), str(data_dir), str(port),