mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-22 21:59:59 +00:00
Reorganize python tests.
Merge batch_others and batch_pg_regress. The original idea was to split all the python tests into multiple "batches" and run each batch in parallel as a separate CI job. However, the batch_pg_regress batch was pretty short compared to all the tests in batch_others. We could split batch_others into multiple batches, but it actually seems better to just treat them as one big pool of tests and use pytest's handle the parallelism on its own. If we need to split them across multiple nodes in the future, we could use pytest-shard or something else, instead of managing the batches ourselves. Merge test_neon_regress.py, test_pg_regress.py and test_isolation.py into one file, test_pg_regress.py. Seems more clear to group all pg_regress-based tests into one file, now that they would all be in the same directory.
This commit is contained in:
@@ -149,7 +149,7 @@ runs:
|
||||
fi
|
||||
|
||||
- name: Upload Allure results
|
||||
if: ${{ always() && (inputs.test_selection == 'batch_others' || inputs.test_selection == 'batch_pg_regress') }}
|
||||
if: ${{ always() && (inputs.test_selection == 'regress') }}
|
||||
uses: ./.github/actions/allure-report
|
||||
with:
|
||||
action: store
|
||||
|
||||
43
.github/workflows/build_and_test.yml
vendored
43
.github/workflows/build_and_test.yml
vendored
@@ -206,7 +206,7 @@ jobs:
|
||||
if: matrix.build_type == 'debug'
|
||||
uses: ./.github/actions/save-coverage-data
|
||||
|
||||
pg_regress-tests:
|
||||
regress-tests:
|
||||
runs-on: dev
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
@@ -224,42 +224,13 @@ jobs:
|
||||
submodules: true
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Pytest regress tests
|
||||
- name: Pytest regression tests
|
||||
uses: ./.github/actions/run-python-test-set
|
||||
with:
|
||||
build_type: ${{ matrix.build_type }}
|
||||
rust_toolchain: ${{ matrix.rust_toolchain }}
|
||||
test_selection: batch_pg_regress
|
||||
test_selection: regress
|
||||
needs_postgres_source: true
|
||||
|
||||
- name: Merge and upload coverage data
|
||||
if: matrix.build_type == 'debug'
|
||||
uses: ./.github/actions/save-coverage-data
|
||||
|
||||
other-tests:
|
||||
runs-on: dev
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
needs: [ build-neon ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
build_type: [ debug, release ]
|
||||
rust_toolchain: [ 1.58 ]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Pytest other tests
|
||||
uses: ./.github/actions/run-python-test-set
|
||||
with:
|
||||
build_type: ${{ matrix.build_type }}
|
||||
rust_toolchain: ${{ matrix.rust_toolchain }}
|
||||
test_selection: batch_others
|
||||
run_with_real_s3: true
|
||||
real_s3_bucket: ci-tests-s3
|
||||
real_s3_region: us-west-2
|
||||
@@ -307,7 +278,7 @@ jobs:
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
needs: [ other-tests, pg_regress-tests ]
|
||||
needs: [ regress-tests ]
|
||||
if: always()
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -330,7 +301,7 @@ jobs:
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
needs: [ other-tests, pg_regress-tests ]
|
||||
needs: [ regress-tests ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -587,7 +558,7 @@ jobs:
|
||||
#container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, other-tests, pg_regress-tests ]
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
@@ -642,7 +613,7 @@ jobs:
|
||||
runs-on: dev
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:latest
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, other-tests, pg_regress-tests ]
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
|
||||
@@ -1077,7 +1077,7 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
.write_message(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
||||
} else if query_string.starts_with("do_gc ") {
|
||||
// Run GC immediately on given timeline.
|
||||
// FIXME: This is just for tests. See test_runner/batch_others/test_gc.py.
|
||||
// FIXME: This is just for tests. See test_runner/regress/test_gc.py.
|
||||
// This probably should require special authentication or a global flag to
|
||||
// enable, I don't think we want to or need to allow regular clients to invoke
|
||||
// GC.
|
||||
|
||||
@@ -15,12 +15,22 @@ Prerequisites:
|
||||
|
||||
### Test Organization
|
||||
|
||||
The tests are divided into a few batches, such that each batch takes roughly
|
||||
the same amount of time. The batches can be run in parallel, to minimize total
|
||||
runtime. Currently, there are only two batches:
|
||||
Regression tests are in the 'regress' directory. They can be run in
|
||||
parallel to minimize total runtime. Most regression test sets up their
|
||||
environment with its own pageservers and safekeepers (but see
|
||||
`TEST_SHARED_FIXTURES`).
|
||||
|
||||
- test_batch_pg_regress: Runs PostgreSQL regression tests
|
||||
- test_others: All other tests
|
||||
'pg_clients' contains tests for connecting with various client
|
||||
libraries. Each client test uses a Dockerfile that pulls an image that
|
||||
contains the client, and connects to PostgreSQL with it. The client
|
||||
tests can be run against an existing PostgreSQL or Neon installation.
|
||||
|
||||
'performance' contains performance regression tests. Each test
|
||||
exercises a particular scenario or workload, and outputs
|
||||
measurements. They should be run serially, to avoid the tests
|
||||
interfering with the performance of each other. Some performance tests
|
||||
set up their own Neon environment, while others can be run against an
|
||||
existing PostgreSQL or Neon environment.
|
||||
|
||||
### Running the tests
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import NeonEnv, base_dir, pg_distrib_dir
|
||||
|
||||
|
||||
# The isolation tests run for a long time, especially in debug mode,
|
||||
# so use a larger-than-default timeout.
|
||||
@pytest.mark.timeout(1800)
|
||||
def test_isolation(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.neon_cli.create_branch("test_isolation", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
# isolation tests use prepared transactions, so enable them
|
||||
pg = env.postgres.create_start("test_isolation", config_lines=["max_prepared_transactions=100"])
|
||||
pg.safe_psql("CREATE DATABASE isolation_regression")
|
||||
|
||||
# Create some local directories for pg_isolation_regress to run in.
|
||||
runpath = test_output_dir / "regress"
|
||||
(runpath / "testtablespace").mkdir(parents=True)
|
||||
|
||||
# Compute all the file locations that pg_isolation_regress will need.
|
||||
build_path = os.path.join(pg_distrib_dir, "build/src/test/isolation")
|
||||
src_path = os.path.join(base_dir, "vendor/postgres/src/test/isolation")
|
||||
bindir = os.path.join(pg_distrib_dir, "bin")
|
||||
schedule = os.path.join(src_path, "isolation_schedule")
|
||||
pg_isolation_regress = os.path.join(build_path, "pg_isolation_regress")
|
||||
|
||||
pg_isolation_regress_command = [
|
||||
pg_isolation_regress,
|
||||
"--use-existing",
|
||||
"--bindir={}".format(bindir),
|
||||
"--dlpath={}".format(build_path),
|
||||
"--inputdir={}".format(src_path),
|
||||
"--schedule={}".format(schedule),
|
||||
]
|
||||
|
||||
env_vars = {
|
||||
"PGPORT": str(pg.default_options["port"]),
|
||||
"PGUSER": pg.default_options["user"],
|
||||
"PGHOST": pg.default_options["host"],
|
||||
}
|
||||
|
||||
# Run the command.
|
||||
# We don't capture the output. It's not too chatty, and it always
|
||||
# logs the exact same data to `regression.out` anyway.
|
||||
with capsys.disabled():
|
||||
pg_bin.run(pg_isolation_regress_command, env=env_vars, cwd=runpath)
|
||||
@@ -1,55 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv, base_dir, check_restored_datadir_content, pg_distrib_dir
|
||||
|
||||
|
||||
def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.neon_cli.create_branch("test_neon_regress", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
pg = env.postgres.create_start("test_neon_regress")
|
||||
pg.safe_psql("CREATE DATABASE regression")
|
||||
|
||||
# Create some local directories for pg_regress to run in.
|
||||
runpath = test_output_dir / "regress"
|
||||
(runpath / "testtablespace").mkdir(parents=True)
|
||||
|
||||
# Compute all the file locations that pg_regress will need.
|
||||
# This test runs neon specific tests
|
||||
build_path = os.path.join(pg_distrib_dir, "build/src/test/regress")
|
||||
src_path = os.path.join(base_dir, "test_runner/neon_regress")
|
||||
bindir = os.path.join(pg_distrib_dir, "bin")
|
||||
schedule = os.path.join(src_path, "parallel_schedule")
|
||||
pg_regress = os.path.join(build_path, "pg_regress")
|
||||
|
||||
pg_regress_command = [
|
||||
pg_regress,
|
||||
"--use-existing",
|
||||
"--bindir={}".format(bindir),
|
||||
"--dlpath={}".format(build_path),
|
||||
"--schedule={}".format(schedule),
|
||||
"--inputdir={}".format(src_path),
|
||||
]
|
||||
|
||||
log.info(pg_regress_command)
|
||||
env_vars = {
|
||||
"PGPORT": str(pg.default_options["port"]),
|
||||
"PGUSER": pg.default_options["user"],
|
||||
"PGHOST": pg.default_options["host"],
|
||||
}
|
||||
|
||||
# Run the command.
|
||||
# We don't capture the output. It's not too chatty, and it always
|
||||
# logs the exact same data to `regression.out` anyway.
|
||||
with capsys.disabled():
|
||||
pg_bin.run(pg_regress_command, env=env_vars, cwd=runpath)
|
||||
|
||||
# checkpoint one more time to ensure that the lsn we get is the latest one
|
||||
pg.safe_psql("CHECKPOINT")
|
||||
pg.safe_psql("select pg_current_wal_insert_lsn()")[0][0]
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(test_output_dir, env, pg)
|
||||
@@ -1,56 +0,0 @@
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import NeonEnv, base_dir, check_restored_datadir_content, pg_distrib_dir
|
||||
|
||||
|
||||
# The pg_regress tests run for a long time, especially in debug mode,
|
||||
# so use a larger-than-default timeout.
|
||||
@pytest.mark.timeout(1800)
|
||||
def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: pathlib.Path, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.neon_cli.create_branch("test_pg_regress", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
pg = env.postgres.create_start("test_pg_regress")
|
||||
pg.safe_psql("CREATE DATABASE regression")
|
||||
|
||||
# Create some local directories for pg_regress to run in.
|
||||
runpath = test_output_dir / "regress"
|
||||
(runpath / "testtablespace").mkdir(parents=True)
|
||||
|
||||
# Compute all the file locations that pg_regress will need.
|
||||
build_path = os.path.join(pg_distrib_dir, "build/src/test/regress")
|
||||
src_path = os.path.join(base_dir, "vendor/postgres/src/test/regress")
|
||||
bindir = os.path.join(pg_distrib_dir, "bin")
|
||||
schedule = os.path.join(src_path, "parallel_schedule")
|
||||
pg_regress = os.path.join(build_path, "pg_regress")
|
||||
|
||||
pg_regress_command = [
|
||||
pg_regress,
|
||||
'--bindir=""',
|
||||
"--use-existing",
|
||||
"--bindir={}".format(bindir),
|
||||
"--dlpath={}".format(build_path),
|
||||
"--schedule={}".format(schedule),
|
||||
"--inputdir={}".format(src_path),
|
||||
]
|
||||
|
||||
env_vars = {
|
||||
"PGPORT": str(pg.default_options["port"]),
|
||||
"PGUSER": pg.default_options["user"],
|
||||
"PGHOST": pg.default_options["host"],
|
||||
}
|
||||
|
||||
# Run the command.
|
||||
# We don't capture the output. It's not too chatty, and it always
|
||||
# logs the exact same data to `regression.out` anyway.
|
||||
with capsys.disabled():
|
||||
pg_bin.run(pg_regress_command, env=env_vars, cwd=runpath)
|
||||
|
||||
# checkpoint one more time to ensure that the lsn we get is the latest one
|
||||
pg.safe_psql("CHECKPOINT")
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(test_output_dir, env, pg)
|
||||
@@ -1,8 +0,0 @@
|
||||
To add a new SQL test
|
||||
|
||||
- add sql script to run to neon_regress/sql/testname.sql
|
||||
- add expected output to neon_regress/expected/testname.out
|
||||
- add testname to parallel_schedule
|
||||
|
||||
That's it.
|
||||
For more complex tests see PostgreSQL regression tests. These works basically the same.
|
||||
@@ -62,10 +62,11 @@ def test_branching_with_pgbench(
|
||||
time.sleep(delay)
|
||||
log.info(f"Sleep {delay}s")
|
||||
|
||||
# If the number of concurrent threads exceeds a threshold,
|
||||
# wait for all the threads to finish before spawning a new one.
|
||||
# Because tests defined in `batch_others` are run concurrently in CI,
|
||||
# we want to avoid the situation that one test exhausts resources for other tests.
|
||||
# If the number of concurrent threads exceeds a threshold, wait for
|
||||
# all the threads to finish before spawning a new one. Because the
|
||||
# regression tests in this directory are run concurrently in CI, we
|
||||
# want to avoid the situation that one test exhausts resources for
|
||||
# other tests.
|
||||
if len(threads) >= thread_limit:
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
159
test_runner/regress/test_pg_regress.py
Normal file
159
test_runner/regress/test_pg_regress.py
Normal file
@@ -0,0 +1,159 @@
|
||||
#
|
||||
# This file runs pg_regress-based tests.
|
||||
#
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import NeonEnv, base_dir, check_restored_datadir_content, pg_distrib_dir
|
||||
|
||||
|
||||
# Run the main PostgreSQL regression tests, in src/test/regress.
|
||||
#
|
||||
# This runs for a long time, especially in debug mode, so use a larger-than-default
|
||||
# timeout.
|
||||
@pytest.mark.timeout(1800)
|
||||
def test_pg_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.neon_cli.create_branch("test_pg_regress", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
pg = env.postgres.create_start("test_pg_regress")
|
||||
pg.safe_psql("CREATE DATABASE regression")
|
||||
|
||||
# Create some local directories for pg_regress to run in.
|
||||
runpath = test_output_dir / "regress"
|
||||
(runpath / "testtablespace").mkdir(parents=True)
|
||||
|
||||
# Compute all the file locations that pg_regress will need.
|
||||
build_path = os.path.join(pg_distrib_dir, "build/src/test/regress")
|
||||
src_path = os.path.join(base_dir, "vendor/postgres/src/test/regress")
|
||||
bindir = os.path.join(pg_distrib_dir, "bin")
|
||||
schedule = os.path.join(src_path, "parallel_schedule")
|
||||
pg_regress = os.path.join(build_path, "pg_regress")
|
||||
|
||||
pg_regress_command = [
|
||||
pg_regress,
|
||||
'--bindir=""',
|
||||
"--use-existing",
|
||||
"--bindir={}".format(bindir),
|
||||
"--dlpath={}".format(build_path),
|
||||
"--schedule={}".format(schedule),
|
||||
"--inputdir={}".format(src_path),
|
||||
]
|
||||
|
||||
env_vars = {
|
||||
"PGPORT": str(pg.default_options["port"]),
|
||||
"PGUSER": pg.default_options["user"],
|
||||
"PGHOST": pg.default_options["host"],
|
||||
}
|
||||
|
||||
# Run the command.
|
||||
# We don't capture the output. It's not too chatty, and it always
|
||||
# logs the exact same data to `regression.out` anyway.
|
||||
with capsys.disabled():
|
||||
pg_bin.run(pg_regress_command, env=env_vars, cwd=runpath)
|
||||
|
||||
# checkpoint one more time to ensure that the lsn we get is the latest one
|
||||
pg.safe_psql("CHECKPOINT")
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(test_output_dir, env, pg)
|
||||
|
||||
|
||||
# Run the PostgreSQL "isolation" tests, in src/test/isolation.
|
||||
#
|
||||
# This runs for a long time, especially in debug mode, so use a larger-than-default
|
||||
# timeout.
|
||||
@pytest.mark.timeout(1800)
|
||||
def test_isolation(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.neon_cli.create_branch("test_isolation", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
# isolation tests use prepared transactions, so enable them
|
||||
pg = env.postgres.create_start("test_isolation", config_lines=["max_prepared_transactions=100"])
|
||||
pg.safe_psql("CREATE DATABASE isolation_regression")
|
||||
|
||||
# Create some local directories for pg_isolation_regress to run in.
|
||||
runpath = test_output_dir / "regress"
|
||||
(runpath / "testtablespace").mkdir(parents=True)
|
||||
|
||||
# Compute all the file locations that pg_isolation_regress will need.
|
||||
build_path = os.path.join(pg_distrib_dir, "build/src/test/isolation")
|
||||
src_path = os.path.join(base_dir, "vendor/postgres/src/test/isolation")
|
||||
bindir = os.path.join(pg_distrib_dir, "bin")
|
||||
schedule = os.path.join(src_path, "isolation_schedule")
|
||||
pg_isolation_regress = os.path.join(build_path, "pg_isolation_regress")
|
||||
|
||||
pg_isolation_regress_command = [
|
||||
pg_isolation_regress,
|
||||
"--use-existing",
|
||||
"--bindir={}".format(bindir),
|
||||
"--dlpath={}".format(build_path),
|
||||
"--inputdir={}".format(src_path),
|
||||
"--schedule={}".format(schedule),
|
||||
]
|
||||
|
||||
env_vars = {
|
||||
"PGPORT": str(pg.default_options["port"]),
|
||||
"PGUSER": pg.default_options["user"],
|
||||
"PGHOST": pg.default_options["host"],
|
||||
}
|
||||
|
||||
# Run the command.
|
||||
# We don't capture the output. It's not too chatty, and it always
|
||||
# logs the exact same data to `regression.out` anyway.
|
||||
with capsys.disabled():
|
||||
pg_bin.run(pg_isolation_regress_command, env=env_vars, cwd=runpath)
|
||||
|
||||
|
||||
# Run extra Neon-specific pg_regress-based tests. The tests and their
|
||||
# schedule file are in the sql_regress/ directory.
|
||||
def test_sql_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, capsys):
|
||||
env = neon_simple_env
|
||||
|
||||
env.neon_cli.create_branch("test_sql_regress", "empty")
|
||||
# Connect to postgres and create a database called "regression".
|
||||
pg = env.postgres.create_start("test_sql_regress")
|
||||
pg.safe_psql("CREATE DATABASE regression")
|
||||
|
||||
# Create some local directories for pg_regress to run in.
|
||||
runpath = test_output_dir / "regress"
|
||||
(runpath / "testtablespace").mkdir(parents=True)
|
||||
|
||||
# Compute all the file locations that pg_regress will need.
|
||||
# This test runs neon specific tests
|
||||
build_path = os.path.join(pg_distrib_dir, "build/src/test/regress")
|
||||
src_path = os.path.join(base_dir, "test_runner/sql_regress")
|
||||
bindir = os.path.join(pg_distrib_dir, "bin")
|
||||
schedule = os.path.join(src_path, "parallel_schedule")
|
||||
pg_regress = os.path.join(build_path, "pg_regress")
|
||||
|
||||
pg_regress_command = [
|
||||
pg_regress,
|
||||
"--use-existing",
|
||||
"--bindir={}".format(bindir),
|
||||
"--dlpath={}".format(build_path),
|
||||
"--schedule={}".format(schedule),
|
||||
"--inputdir={}".format(src_path),
|
||||
]
|
||||
|
||||
env_vars = {
|
||||
"PGPORT": str(pg.default_options["port"]),
|
||||
"PGUSER": pg.default_options["user"],
|
||||
"PGHOST": pg.default_options["host"],
|
||||
}
|
||||
|
||||
# Run the command.
|
||||
# We don't capture the output. It's not too chatty, and it always
|
||||
# logs the exact same data to `regression.out` anyway.
|
||||
with capsys.disabled():
|
||||
pg_bin.run(pg_regress_command, env=env_vars, cwd=runpath)
|
||||
|
||||
# checkpoint one more time to ensure that the lsn we get is the latest one
|
||||
pg.safe_psql("CHECKPOINT")
|
||||
pg.safe_psql("select pg_current_wal_insert_lsn()")[0][0]
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(test_output_dir, env, pg)
|
||||
13
test_runner/sql_regress/README.md
Normal file
13
test_runner/sql_regress/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
Simple tests that only need a PostgreSQL connection to run.
|
||||
These are run by the regress/test_pg_regress.py test, which uses
|
||||
the PostgreSQL pg_regress utility.
|
||||
|
||||
To add a new SQL test:
|
||||
|
||||
- add sql script to run to neon_regress/sql/testname.sql
|
||||
- add expected output to neon_regress/expected/testname.out
|
||||
- add testname to parallel_schedule
|
||||
|
||||
That's it.
|
||||
For more complex tests see PostgreSQL regression tests in src/test/regress.
|
||||
These work basically the same.
|
||||
Reference in New Issue
Block a user