mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-08 14:02:55 +00:00
The CI times out after 10 minutes of no output. It's annoying if a test hangs and is killed by the CI timeout, because you don't get information about which test was running. Try to avoid that, by adding a slightly smaller timeout in pytest itself. You can override it on a per-test basis if needed, but let's try to keep our tests shorter than that. For the Postgres regression tests, use a longer 30 minute timeout. They're not really a single test, but many tests wrapped in a single pytest test. It's OK for them to run longer in aggregate, each Postgres test is still fairly short.
51 lines
2.0 KiB
Python
51 lines
2.0 KiB
Python
import os
|
|
import pytest
|
|
from fixtures.utils import mkdir_if_needed
|
|
from fixtures.zenith_fixtures import ZenithEnv, base_dir, pg_distrib_dir
|
|
|
|
|
|
# The isolation tests run for a long time, especially in debug mode,
|
|
# so use a larger-than-default timeout.
|
|
@pytest.mark.timeout(1800)
|
|
def test_isolation(zenith_simple_env: ZenithEnv, test_output_dir, pg_bin, capsys):
|
|
env = zenith_simple_env
|
|
|
|
env.zenith_cli.create_branch("test_isolation", "empty")
|
|
# Connect to postgres and create a database called "regression".
|
|
# isolation tests use prepared transactions, so enable them
|
|
pg = env.postgres.create_start('test_isolation', config_lines=['max_prepared_transactions=100'])
|
|
pg.safe_psql('CREATE DATABASE isolation_regression')
|
|
|
|
# Create some local directories for pg_isolation_regress to run in.
|
|
runpath = os.path.join(test_output_dir, 'regress')
|
|
mkdir_if_needed(runpath)
|
|
mkdir_if_needed(os.path.join(runpath, 'testtablespace'))
|
|
|
|
# Compute all the file locations that pg_isolation_regress will need.
|
|
build_path = os.path.join(pg_distrib_dir, 'build/src/test/isolation')
|
|
src_path = os.path.join(base_dir, 'vendor/postgres/src/test/isolation')
|
|
bindir = os.path.join(pg_distrib_dir, 'bin')
|
|
schedule = os.path.join(src_path, 'isolation_schedule')
|
|
pg_isolation_regress = os.path.join(build_path, 'pg_isolation_regress')
|
|
|
|
pg_isolation_regress_command = [
|
|
pg_isolation_regress,
|
|
'--use-existing',
|
|
'--bindir={}'.format(bindir),
|
|
'--dlpath={}'.format(build_path),
|
|
'--inputdir={}'.format(src_path),
|
|
'--schedule={}'.format(schedule),
|
|
]
|
|
|
|
env_vars = {
|
|
'PGPORT': str(pg.default_options['port']),
|
|
'PGUSER': pg.default_options['user'],
|
|
'PGHOST': pg.default_options['host'],
|
|
}
|
|
|
|
# Run the command.
|
|
# We don't capture the output. It's not too chatty, and it always
|
|
# logs the exact same data to `regression.out` anyway.
|
|
with capsys.disabled():
|
|
pg_bin.run(pg_isolation_regress_command, env=env_vars, cwd=runpath)
|