From f38c2e620e7eea16389f90be9c668271237a1395 Mon Sep 17 00:00:00 2001 From: anastasia Date: Wed, 19 May 2021 19:59:52 +0300 Subject: [PATCH] Add test_zenith_regress.py that runs pg_regress styled tests from test_runner/zenith_regress. TODO: remove similar tests from vendor/postgres testset --- .../batch_pg_regress/test_zehith_regress.py | 62 ++++ test_runner/zenith_regress/.gitignore | 11 + test_runner/zenith_regress/README.md | 11 + .../zenith_regress/expected/.gitignore | 9 + .../zenith_regress/expected/zenith-cid.out | 34 ++ .../zenith_regress/expected/zenith-clog.out | 15 + .../expected/zenith-rel-truncate.out | 19 ++ .../expected/zenith-vacuum-full.out | 304 ++++++++++++++++++ test_runner/zenith_regress/parallel_schedule | 11 + test_runner/zenith_regress/serial_schedule | 6 + test_runner/zenith_regress/sql/.gitignore | 8 + test_runner/zenith_regress/sql/zenith-cid.sql | 26 ++ .../zenith_regress/sql/zenith-clog.sql | 16 + .../sql/zenith-rel-truncate.sql | 18 ++ .../zenith_regress/sql/zenith-vacuum-full.sql | 51 +++ 15 files changed, 601 insertions(+) create mode 100644 test_runner/batch_pg_regress/test_zehith_regress.py create mode 100644 test_runner/zenith_regress/.gitignore create mode 100644 test_runner/zenith_regress/README.md create mode 100644 test_runner/zenith_regress/expected/.gitignore create mode 100644 test_runner/zenith_regress/expected/zenith-cid.out create mode 100644 test_runner/zenith_regress/expected/zenith-clog.out create mode 100644 test_runner/zenith_regress/expected/zenith-rel-truncate.out create mode 100644 test_runner/zenith_regress/expected/zenith-vacuum-full.out create mode 100644 test_runner/zenith_regress/parallel_schedule create mode 100644 test_runner/zenith_regress/serial_schedule create mode 100644 test_runner/zenith_regress/sql/.gitignore create mode 100644 test_runner/zenith_regress/sql/zenith-cid.sql create mode 100644 test_runner/zenith_regress/sql/zenith-clog.sql create mode 100644 test_runner/zenith_regress/sql/zenith-rel-truncate.sql create mode 100644 test_runner/zenith_regress/sql/zenith-vacuum-full.sql diff --git a/test_runner/batch_pg_regress/test_zehith_regress.py b/test_runner/batch_pg_regress/test_zehith_regress.py new file mode 100644 index 0000000000..cef4ec780d --- /dev/null +++ b/test_runner/batch_pg_regress/test_zehith_regress.py @@ -0,0 +1,62 @@ +import pytest +from fixtures.utils import mkdir_if_needed +import getpass +import os +import psycopg2 + +pytest_plugins = ("fixtures.zenith_fixtures") + +# FIXME: put host + port in a fixture +HOST = 'localhost' +PORT = 55432 + + +def test_zenith_regress(pageserver, postgres, pg_bin, zenith_cli, test_output_dir, pg_distrib_dir, base_dir, capsys): + + # Create a branch for us + zenith_cli.run(["branch", "test_zenith_regress", "empty"]); + + # Connect to postgres and create a database called "regression". + pg = postgres.create_start('test_zenith_regress') + pg_conn = psycopg2.connect(pg.connstr()) + pg_conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cur = pg_conn.cursor() + cur.execute('CREATE DATABASE regression') + pg_conn.close() + + # Create some local directories for pg_regress to run in. + runpath = os.path.join(test_output_dir, 'regress') + mkdir_if_needed(runpath) + mkdir_if_needed(os.path.join(runpath, 'testtablespace')) + + # Compute all the file locations that pg_regress will need. + # This test runs zenith specific tests + build_path = os.path.join( + pg_distrib_dir, 'build/src/test/regress') + src_path = os.path.join( + base_dir, 'test_runner/zenith_regress') + bindir = os.path.join(pg_distrib_dir, 'bin') + schedule = os.path.join(src_path, 'parallel_schedule') + pg_regress = os.path.join(build_path, 'pg_regress') + + pg_regress_command = [ + pg_regress, + '--use-existing', + '--bindir={}'.format(bindir), + '--dlpath={}'.format(build_path), + '--schedule={}'.format(schedule), + '--inputdir={}'.format(src_path), + ] + + print(pg_regress_command) + env = { + 'PGPORT': str(pg.port), + 'PGUSER': pg.username, + 'PGHOST': pg.host, + } + + # Run the command. + # We don't capture the output. It's not too chatty, and it always + # logs the exact same data to `regression.out` anyway. + with capsys.disabled(): + pg_bin.run(pg_regress_command, env=env, cwd=runpath) diff --git a/test_runner/zenith_regress/.gitignore b/test_runner/zenith_regress/.gitignore new file mode 100644 index 0000000000..89129d7358 --- /dev/null +++ b/test_runner/zenith_regress/.gitignore @@ -0,0 +1,11 @@ +# Local binaries +/pg_regress + +# Generated subdirectories +/tmp_check/ +/results/ +/log/ + +# Note: regression.* are only left behind on a failure; that's why they're not ignored +#/regression.diffs +#/regression.out diff --git a/test_runner/zenith_regress/README.md b/test_runner/zenith_regress/README.md new file mode 100644 index 0000000000..57f224c473 --- /dev/null +++ b/test_runner/zenith_regress/README.md @@ -0,0 +1,11 @@ +To add a new SQL test + +- add sql script to run to zenith_regress/sql/testname.sql +- add expected output to zenith/regress/expected/testname.out +- add testname to both parallel_schedule and serial_schedule files* + +That's it. +For more complex tests see PostgreSQL regression tests. These works basically the same. + +*it was changed recently in PostgreSQL upstream - no more separate serial_schedule. +Someday we'll catch up with these changes. diff --git a/test_runner/zenith_regress/expected/.gitignore b/test_runner/zenith_regress/expected/.gitignore new file mode 100644 index 0000000000..93c56c85a0 --- /dev/null +++ b/test_runner/zenith_regress/expected/.gitignore @@ -0,0 +1,9 @@ +/constraints.out +/copy.out +/create_function_1.out +/create_function_2.out +/largeobject.out +/largeobject_1.out +/misc.out +/security_label.out +/tablespace.out diff --git a/test_runner/zenith_regress/expected/zenith-cid.out b/test_runner/zenith_regress/expected/zenith-cid.out new file mode 100644 index 0000000000..f8dab98ec9 --- /dev/null +++ b/test_runner/zenith_regress/expected/zenith-cid.out @@ -0,0 +1,34 @@ +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE cursor (a int); +INSERT INTO cursor VALUES (1); +DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; +UPDATE cursor SET a = 2; +FETCH ALL FROM c1; + a +--- +(0 rows) + +COMMIT; +DROP TABLE cursor; +create table to_be_evicted(x bigint); +begin; +insert into to_be_evicted values (1); +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +select sum(x) from to_be_evicted; + sum +------------- + 25937424601 +(1 row) + +end; +drop table to_be_evicted; diff --git a/test_runner/zenith_regress/expected/zenith-clog.out b/test_runner/zenith_regress/expected/zenith-clog.out new file mode 100644 index 0000000000..f73186065d --- /dev/null +++ b/test_runner/zenith_regress/expected/zenith-clog.out @@ -0,0 +1,15 @@ +create or replace procedure do_commits() as $$ +declare + xid xid8; + i integer; +begin + for i in 1..1000000 loop + xid = txid_current(); + commit; + if (pg_xact_status(xid) <> 'committed') then + raise exception 'CLOG corruption'; + end if; + end loop; +end; +$$ language plpgsql; +call do_commits(); diff --git a/test_runner/zenith_regress/expected/zenith-rel-truncate.out b/test_runner/zenith_regress/expected/zenith-rel-truncate.out new file mode 100644 index 0000000000..ed53357c4a --- /dev/null +++ b/test_runner/zenith_regress/expected/zenith-rel-truncate.out @@ -0,0 +1,19 @@ +-- +-- Test that when a relation is truncated by VACUUM, the next smgrnblocks() +-- query to get the relation's size returns the new size. +-- (This isn't related to the TRUNCATE command, which works differently, +-- by creating a new relation file) +-- +CREATE TABLE truncatetest (i int); +INSERT INTO truncatetest SELECT g FROM generate_series(1, 10000) g; +-- Remove all the rows, and run VACUUM to remove the dead tuples and +-- truncate the physical relation to 0 blocks. +DELETE FROM truncatetest; +VACUUM truncatetest; +-- Check that a SeqScan sees correct relation size (which is now 0) +SELECT * FROM truncatetest; + i +--- +(0 rows) + +DROP TABLE truncatetest; diff --git a/test_runner/zenith_regress/expected/zenith-vacuum-full.out b/test_runner/zenith_regress/expected/zenith-vacuum-full.out new file mode 100644 index 0000000000..17e3e459a6 --- /dev/null +++ b/test_runner/zenith_regress/expected/zenith-vacuum-full.out @@ -0,0 +1,304 @@ +create table foo(a int primary key, b int, c int); +insert into foo values (generate_series(1,10000), generate_series(1,10000), generate_series(1,10000)); +create index concurrently on foo(b); +create index concurrently on foo(c); +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +vacuum full foo; +\d foo + Table "public.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "foo_pkey" PRIMARY KEY, btree (a) + "foo_b_idx" btree (b) + "foo_c_idx" btree (c) + +drop table foo; diff --git a/test_runner/zenith_regress/parallel_schedule b/test_runner/zenith_regress/parallel_schedule new file mode 100644 index 0000000000..fb9686c08e --- /dev/null +++ b/test_runner/zenith_regress/parallel_schedule @@ -0,0 +1,11 @@ +# ---------- +# src/test/regress/parallel_schedule +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + +test: zenith-cid +test: zenith-rel-truncate +test: zenith-clog +test: zenith-vacuum-full diff --git a/test_runner/zenith_regress/serial_schedule b/test_runner/zenith_regress/serial_schedule new file mode 100644 index 0000000000..ce7c693b99 --- /dev/null +++ b/test_runner/zenith_regress/serial_schedule @@ -0,0 +1,6 @@ +# src/test/regress/serial_schedule +# This should probably be in an order similar to parallel_schedule. +test: zenith-cid +test: zenith-rel-truncate +test: zenith-clog +test: zenith-vacuum-full diff --git a/test_runner/zenith_regress/sql/.gitignore b/test_runner/zenith_regress/sql/.gitignore new file mode 100644 index 0000000000..46c8112094 --- /dev/null +++ b/test_runner/zenith_regress/sql/.gitignore @@ -0,0 +1,8 @@ +/constraints.sql +/copy.sql +/create_function_1.sql +/create_function_2.sql +/largeobject.sql +/misc.sql +/security_label.sql +/tablespace.sql diff --git a/test_runner/zenith_regress/sql/zenith-cid.sql b/test_runner/zenith_regress/sql/zenith-cid.sql new file mode 100644 index 0000000000..14accfe1ad --- /dev/null +++ b/test_runner/zenith_regress/sql/zenith-cid.sql @@ -0,0 +1,26 @@ +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE cursor (a int); +INSERT INTO cursor VALUES (1); +DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; +UPDATE cursor SET a = 2; +FETCH ALL FROM c1; +COMMIT; +DROP TABLE cursor; + +create table to_be_evicted(x bigint); +begin; +insert into to_be_evicted values (1); +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +insert into to_be_evicted select x*10 from to_be_evicted; +select sum(x) from to_be_evicted; +end; +drop table to_be_evicted; diff --git a/test_runner/zenith_regress/sql/zenith-clog.sql b/test_runner/zenith_regress/sql/zenith-clog.sql new file mode 100644 index 0000000000..9eadfaa747 --- /dev/null +++ b/test_runner/zenith_regress/sql/zenith-clog.sql @@ -0,0 +1,16 @@ +create or replace procedure do_commits() as $$ +declare + xid xid8; + i integer; +begin + for i in 1..1000000 loop + xid = txid_current(); + commit; + if (pg_xact_status(xid) <> 'committed') then + raise exception 'CLOG corruption'; + end if; + end loop; +end; +$$ language plpgsql; + +call do_commits(); diff --git a/test_runner/zenith_regress/sql/zenith-rel-truncate.sql b/test_runner/zenith_regress/sql/zenith-rel-truncate.sql new file mode 100644 index 0000000000..7a35cad3ab --- /dev/null +++ b/test_runner/zenith_regress/sql/zenith-rel-truncate.sql @@ -0,0 +1,18 @@ +-- +-- Test that when a relation is truncated by VACUUM, the next smgrnblocks() +-- query to get the relation's size returns the new size. +-- (This isn't related to the TRUNCATE command, which works differently, +-- by creating a new relation file) +-- +CREATE TABLE truncatetest (i int); +INSERT INTO truncatetest SELECT g FROM generate_series(1, 10000) g; + +-- Remove all the rows, and run VACUUM to remove the dead tuples and +-- truncate the physical relation to 0 blocks. +DELETE FROM truncatetest; +VACUUM truncatetest; + +-- Check that a SeqScan sees correct relation size (which is now 0) +SELECT * FROM truncatetest; + +DROP TABLE truncatetest; diff --git a/test_runner/zenith_regress/sql/zenith-vacuum-full.sql b/test_runner/zenith_regress/sql/zenith-vacuum-full.sql new file mode 100644 index 0000000000..a2625a65a9 --- /dev/null +++ b/test_runner/zenith_regress/sql/zenith-vacuum-full.sql @@ -0,0 +1,51 @@ +create table foo(a int primary key, b int, c int); +insert into foo values (generate_series(1,10000), generate_series(1,10000), generate_series(1,10000)); +create index concurrently on foo(b); +create index concurrently on foo(c); +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +vacuum full foo; +\d foo +drop table foo;