Add test_zenith_regress.py that runs pg_regress styled tests from test_runner/zenith_regress.

TODO: remove similar tests from vendor/postgres testset
This commit is contained in:
anastasia
2021-05-19 19:59:52 +03:00
committed by lubennikovaav
parent 86056abd0e
commit f38c2e620e
15 changed files with 601 additions and 0 deletions

View File

@@ -0,0 +1,62 @@
import pytest
from fixtures.utils import mkdir_if_needed
import getpass
import os
import psycopg2
pytest_plugins = ("fixtures.zenith_fixtures")
# FIXME: put host + port in a fixture
HOST = 'localhost'
PORT = 55432
def test_zenith_regress(pageserver, postgres, pg_bin, zenith_cli, test_output_dir, pg_distrib_dir, base_dir, capsys):
# Create a branch for us
zenith_cli.run(["branch", "test_zenith_regress", "empty"]);
# Connect to postgres and create a database called "regression".
pg = postgres.create_start('test_zenith_regress')
pg_conn = psycopg2.connect(pg.connstr())
pg_conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = pg_conn.cursor()
cur.execute('CREATE DATABASE regression')
pg_conn.close()
# Create some local directories for pg_regress to run in.
runpath = os.path.join(test_output_dir, 'regress')
mkdir_if_needed(runpath)
mkdir_if_needed(os.path.join(runpath, 'testtablespace'))
# Compute all the file locations that pg_regress will need.
# This test runs zenith specific tests
build_path = os.path.join(
pg_distrib_dir, 'build/src/test/regress')
src_path = os.path.join(
base_dir, 'test_runner/zenith_regress')
bindir = os.path.join(pg_distrib_dir, 'bin')
schedule = os.path.join(src_path, 'parallel_schedule')
pg_regress = os.path.join(build_path, 'pg_regress')
pg_regress_command = [
pg_regress,
'--use-existing',
'--bindir={}'.format(bindir),
'--dlpath={}'.format(build_path),
'--schedule={}'.format(schedule),
'--inputdir={}'.format(src_path),
]
print(pg_regress_command)
env = {
'PGPORT': str(pg.port),
'PGUSER': pg.username,
'PGHOST': pg.host,
}
# Run the command.
# We don't capture the output. It's not too chatty, and it always
# logs the exact same data to `regression.out` anyway.
with capsys.disabled():
pg_bin.run(pg_regress_command, env=env, cwd=runpath)

11
test_runner/zenith_regress/.gitignore vendored Normal file
View File

@@ -0,0 +1,11 @@
# Local binaries
/pg_regress
# Generated subdirectories
/tmp_check/
/results/
/log/
# Note: regression.* are only left behind on a failure; that's why they're not ignored
#/regression.diffs
#/regression.out

View File

@@ -0,0 +1,11 @@
To add a new SQL test
- add sql script to run to zenith_regress/sql/testname.sql
- add expected output to zenith/regress/expected/testname.out
- add testname to both parallel_schedule and serial_schedule files*
That's it.
For more complex tests see PostgreSQL regression tests. These works basically the same.
*it was changed recently in PostgreSQL upstream - no more separate serial_schedule.
Someday we'll catch up with these changes.

View File

@@ -0,0 +1,9 @@
/constraints.out
/copy.out
/create_function_1.out
/create_function_2.out
/largeobject.out
/largeobject_1.out
/misc.out
/security_label.out
/tablespace.out

View File

@@ -0,0 +1,34 @@
BEGIN;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
CREATE TABLE cursor (a int);
INSERT INTO cursor VALUES (1);
DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE;
UPDATE cursor SET a = 2;
FETCH ALL FROM c1;
a
---
(0 rows)
COMMIT;
DROP TABLE cursor;
create table to_be_evicted(x bigint);
begin;
insert into to_be_evicted values (1);
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
select sum(x) from to_be_evicted;
sum
-------------
25937424601
(1 row)
end;
drop table to_be_evicted;

View File

@@ -0,0 +1,15 @@
create or replace procedure do_commits() as $$
declare
xid xid8;
i integer;
begin
for i in 1..1000000 loop
xid = txid_current();
commit;
if (pg_xact_status(xid) <> 'committed') then
raise exception 'CLOG corruption';
end if;
end loop;
end;
$$ language plpgsql;
call do_commits();

View File

@@ -0,0 +1,19 @@
--
-- Test that when a relation is truncated by VACUUM, the next smgrnblocks()
-- query to get the relation's size returns the new size.
-- (This isn't related to the TRUNCATE command, which works differently,
-- by creating a new relation file)
--
CREATE TABLE truncatetest (i int);
INSERT INTO truncatetest SELECT g FROM generate_series(1, 10000) g;
-- Remove all the rows, and run VACUUM to remove the dead tuples and
-- truncate the physical relation to 0 blocks.
DELETE FROM truncatetest;
VACUUM truncatetest;
-- Check that a SeqScan sees correct relation size (which is now 0)
SELECT * FROM truncatetest;
i
---
(0 rows)
DROP TABLE truncatetest;

View File

@@ -0,0 +1,304 @@
create table foo(a int primary key, b int, c int);
insert into foo values (generate_series(1,10000), generate_series(1,10000), generate_series(1,10000));
create index concurrently on foo(b);
create index concurrently on foo(c);
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
vacuum full foo;
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
a | integer | | not null |
b | integer | | |
c | integer | | |
Indexes:
"foo_pkey" PRIMARY KEY, btree (a)
"foo_b_idx" btree (b)
"foo_c_idx" btree (c)
drop table foo;

View File

@@ -0,0 +1,11 @@
# ----------
# src/test/regress/parallel_schedule
#
# By convention, we put no more than twenty tests in any one parallel group;
# this limits the number of connections needed to run the tests.
# ----------
test: zenith-cid
test: zenith-rel-truncate
test: zenith-clog
test: zenith-vacuum-full

View File

@@ -0,0 +1,6 @@
# src/test/regress/serial_schedule
# This should probably be in an order similar to parallel_schedule.
test: zenith-cid
test: zenith-rel-truncate
test: zenith-clog
test: zenith-vacuum-full

View File

@@ -0,0 +1,8 @@
/constraints.sql
/copy.sql
/create_function_1.sql
/create_function_2.sql
/largeobject.sql
/misc.sql
/security_label.sql
/tablespace.sql

View File

@@ -0,0 +1,26 @@
BEGIN;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
CREATE TABLE cursor (a int);
INSERT INTO cursor VALUES (1);
DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE;
UPDATE cursor SET a = 2;
FETCH ALL FROM c1;
COMMIT;
DROP TABLE cursor;
create table to_be_evicted(x bigint);
begin;
insert into to_be_evicted values (1);
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
insert into to_be_evicted select x*10 from to_be_evicted;
select sum(x) from to_be_evicted;
end;
drop table to_be_evicted;

View File

@@ -0,0 +1,16 @@
create or replace procedure do_commits() as $$
declare
xid xid8;
i integer;
begin
for i in 1..1000000 loop
xid = txid_current();
commit;
if (pg_xact_status(xid) <> 'committed') then
raise exception 'CLOG corruption';
end if;
end loop;
end;
$$ language plpgsql;
call do_commits();

View File

@@ -0,0 +1,18 @@
--
-- Test that when a relation is truncated by VACUUM, the next smgrnblocks()
-- query to get the relation's size returns the new size.
-- (This isn't related to the TRUNCATE command, which works differently,
-- by creating a new relation file)
--
CREATE TABLE truncatetest (i int);
INSERT INTO truncatetest SELECT g FROM generate_series(1, 10000) g;
-- Remove all the rows, and run VACUUM to remove the dead tuples and
-- truncate the physical relation to 0 blocks.
DELETE FROM truncatetest;
VACUUM truncatetest;
-- Check that a SeqScan sees correct relation size (which is now 0)
SELECT * FROM truncatetest;
DROP TABLE truncatetest;

View File

@@ -0,0 +1,51 @@
create table foo(a int primary key, b int, c int);
insert into foo values (generate_series(1,10000), generate_series(1,10000), generate_series(1,10000));
create index concurrently on foo(b);
create index concurrently on foo(c);
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
vacuum full foo;
\d foo
drop table foo;