mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-05 20:42:54 +00:00
Force yapf (Python code formatter) in CI (#772)
* Add yapf run to CircleCI * Pin yapf version * Enable `SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES` setting * Reformat all existing code with slight manual adjustments * test_runner/README: note that yapf is forced
This commit is contained in:
@@ -182,6 +182,21 @@ jobs:
|
||||
paths:
|
||||
- "*"
|
||||
|
||||
check-python:
|
||||
executor: python/default
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install pipenv & deps
|
||||
working_directory: test_runner
|
||||
command: |
|
||||
pip install pipenv
|
||||
pipenv install --dev
|
||||
- run:
|
||||
name: Run yapf to ensure code format
|
||||
working_directory: test_runner
|
||||
command: pipenv run yapf --recursive --diff .
|
||||
|
||||
run-pytest:
|
||||
#description: "Run pytest"
|
||||
executor: python/default
|
||||
@@ -333,6 +348,7 @@ workflows:
|
||||
build_and_test:
|
||||
jobs:
|
||||
- check-codestyle
|
||||
- check-python
|
||||
- build-postgres:
|
||||
name: build-postgres-<< matrix.build_type >>
|
||||
matrix:
|
||||
|
||||
@@ -14,9 +14,11 @@ asyncpg = "*"
|
||||
cached-property = "*"
|
||||
|
||||
[dev-packages]
|
||||
yapf = "*"
|
||||
flake8 = "*"
|
||||
mypy = "*"
|
||||
# Behavior may change slightly between versions. These are run continuously,
|
||||
# so we pin exact versions to avoid suprising breaks. Update if comfortable.
|
||||
yapf = "==0.31.0"
|
||||
|
||||
[requires]
|
||||
# we need at least 3.6, but pipenv doesn't allow to say this directly
|
||||
|
||||
2
test_runner/Pipfile.lock
generated
2
test_runner/Pipfile.lock
generated
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "b3ebe8fa70f41f9f79a8727ff47131b9e30772548749c85587987dcbb7bed336"
|
||||
"sha256": "3645ae8d2dcf55bd2a54963c44cfeedf577f3b289d1077365214a80a7f36e643"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
|
||||
@@ -95,11 +95,13 @@ Python destructors, e.g. `__del__()` aren't recommended for cleanup.
|
||||
|
||||
### Code quality
|
||||
|
||||
We force code formatting via yapf:
|
||||
|
||||
1. Install `yapf` and other tools (`flake8`, `mypy`) with `pipenv install --dev`.
|
||||
1. Reformat all your code by running `pipenv run yapf -ri .` in the `test_runner/` directory.
|
||||
|
||||
Before submitting a patch, please consider:
|
||||
|
||||
* Writing a couple of docstrings to clarify the reasoning behind a new test.
|
||||
* Running `flake8` (or a linter of your choice, e.g. `pycodestyle`) and fixing possible defects, if any.
|
||||
* Formatting the code with `yapf -r -i .` (TODO: implement an opt-in pre-commit hook for that).
|
||||
* (Optional) Typechecking the code with `mypy .`. Currently this mostly affects `fixtures/zenith_fixtures.py`.
|
||||
|
||||
The tools can be installed with `pipenv install --dev`.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
from contextlib import closing
|
||||
from typing import Iterator
|
||||
from uuid import uuid4
|
||||
@@ -6,7 +5,6 @@ import psycopg2
|
||||
from fixtures.zenith_fixtures import PortDistributor, Postgres, ZenithCli, ZenithPageserver, PgBin
|
||||
import pytest
|
||||
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
@@ -35,7 +33,9 @@ def test_pageserver_auth(pageserver_auth_enabled: ZenithPageserver):
|
||||
ps.safe_psql(f"tenant_create {uuid4().hex}", password=management_token)
|
||||
|
||||
# fail to create tenant using tenant token
|
||||
with pytest.raises(psycopg2.DatabaseError, match='Attempt to access management api with tenant scope. Permission denied'):
|
||||
with pytest.raises(
|
||||
psycopg2.DatabaseError,
|
||||
match='Attempt to access management api with tenant scope. Permission denied'):
|
||||
ps.safe_psql(f"tenant_create {uuid4().hex}", password=tenant_token)
|
||||
|
||||
|
||||
@@ -60,14 +60,14 @@ def test_compute_auth_to_pageserver(
|
||||
wa_factory.start_n_new(3, management_token)
|
||||
|
||||
with Postgres(
|
||||
zenith_cli=zenith_cli,
|
||||
repo_dir=repo_dir,
|
||||
pg_bin=pg_bin,
|
||||
tenant_id=ps.initial_tenant,
|
||||
port=port_distributor.get_port(),
|
||||
zenith_cli=zenith_cli,
|
||||
repo_dir=repo_dir,
|
||||
pg_bin=pg_bin,
|
||||
tenant_id=ps.initial_tenant,
|
||||
port=port_distributor.get_port(),
|
||||
).create_start(
|
||||
branch,
|
||||
wal_acceptors=wa_factory.get_connstrs() if with_wal_acceptors else None,
|
||||
branch,
|
||||
wal_acceptors=wa_factory.get_connstrs() if with_wal_acceptors else None,
|
||||
) as pg:
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
@@ -18,9 +18,12 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
|
||||
|
||||
# set agressive autovacuum to make sure that truncation will happen
|
||||
config = [
|
||||
'autovacuum_max_workers=10', 'autovacuum_vacuum_threshold=0',
|
||||
'autovacuum_vacuum_insert_threshold=0', 'autovacuum_vacuum_cost_delay=0',
|
||||
'autovacuum_vacuum_cost_limit=10000', 'autovacuum_naptime =1s',
|
||||
'autovacuum_max_workers=10',
|
||||
'autovacuum_vacuum_threshold=0',
|
||||
'autovacuum_vacuum_insert_threshold=0',
|
||||
'autovacuum_vacuum_cost_delay=0',
|
||||
'autovacuum_vacuum_cost_limit=10000',
|
||||
'autovacuum_naptime =1s',
|
||||
'autovacuum_freeze_max_age=100000'
|
||||
]
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ def test_createdb(
|
||||
for db in (pg, pg2):
|
||||
db.connect(dbname='foodb').close()
|
||||
|
||||
|
||||
#
|
||||
# Test DROP DATABASE
|
||||
#
|
||||
@@ -49,7 +50,7 @@ def test_dropdb(
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
test_output_dir
|
||||
test_output_dir,
|
||||
):
|
||||
zenith_cli.run(["branch", "test_dropdb", "empty"])
|
||||
|
||||
@@ -66,7 +67,6 @@ def test_dropdb(
|
||||
cur.execute("SELECT oid FROM pg_database WHERE datname='foodb';")
|
||||
dboid = cur.fetchone()[0]
|
||||
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute('DROP DATABASE foodb')
|
||||
@@ -76,7 +76,6 @@ def test_dropdb(
|
||||
cur.execute('SELECT pg_current_wal_insert_lsn()')
|
||||
lsn_after_drop = cur.fetchone()[0]
|
||||
|
||||
|
||||
# Create two branches before and after database drop.
|
||||
zenith_cli.run(["branch", "test_before_dropdb", "test_dropdb@" + lsn_before_drop])
|
||||
pg_before = postgres.create_start('test_before_dropdb')
|
||||
|
||||
@@ -10,8 +10,12 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
# it only checks next_multixact_id field in restored pg_control,
|
||||
# since we don't have functions to check multixact internals.
|
||||
#
|
||||
def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
|
||||
pg_bin, zenith_cli, base_dir, test_output_dir):
|
||||
def test_multixact(pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
base_dir,
|
||||
test_output_dir):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_multixact", "empty"])
|
||||
pg = postgres.create_start('test_multixact')
|
||||
|
||||
@@ -5,6 +5,7 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
#
|
||||
# Test where Postgres generates a lot of WAL, and it's garbage collected away, but
|
||||
# no pages are evicted so that Postgres uses an old LSN in a GetPage request.
|
||||
@@ -15,7 +16,10 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
# just a hint that the page hasn't been modified since that LSN, and the page
|
||||
# server should return the latest page version regardless of the LSN.
|
||||
#
|
||||
def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin):
|
||||
def test_old_request_lsn(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_old_request_lsn", "empty"])
|
||||
pg = postgres.create_start('test_old_request_lsn')
|
||||
@@ -47,20 +51,20 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
|
||||
from pg_settings where name = 'shared_buffers'
|
||||
''')
|
||||
row = cur.fetchone()
|
||||
log.info(f'shared_buffers is {row[0]}, table size {row[1]}');
|
||||
log.info(f'shared_buffers is {row[0]}, table size {row[1]}')
|
||||
assert int(row[0]) < int(row[1])
|
||||
|
||||
cur.execute('VACUUM foo');
|
||||
cur.execute('VACUUM foo')
|
||||
|
||||
# Make a lot of updates on a single row, generating a lot of WAL. Trigger
|
||||
# garbage collections so that the page server will remove old page versions.
|
||||
for i in range(10):
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
for j in range(100):
|
||||
cur.execute('UPDATE foo SET val = val + 1 WHERE id = 1;');
|
||||
cur.execute('UPDATE foo SET val = val + 1 WHERE id = 1;')
|
||||
|
||||
# All (or at least most of) the updates should've been on the same page, so
|
||||
# that we haven't had to evict any dirty pages for a long time. Now run
|
||||
# a query that sends GetPage@LSN requests with the old LSN.
|
||||
cur.execute("SELECT COUNT(*), SUM(val) FROM foo");
|
||||
cur.execute("SELECT COUNT(*), SUM(val) FROM foo")
|
||||
assert cur.fetchone() == (100000, 101000)
|
||||
|
||||
@@ -63,7 +63,8 @@ def test_tenant_list_psql(pageserver: ZenithPageserver, zenith_cli):
|
||||
cur = conn.cursor()
|
||||
|
||||
# check same tenant cannot be created twice
|
||||
with pytest.raises(psycopg2.DatabaseError, match=f'tenant {pageserver.initial_tenant} already exists'):
|
||||
with pytest.raises(psycopg2.DatabaseError,
|
||||
match=f'tenant {pageserver.initial_tenant} already exists'):
|
||||
cur.execute(f'tenant_create {pageserver.initial_tenant}')
|
||||
|
||||
# create one more tenant
|
||||
@@ -102,5 +103,6 @@ def test_pageserver_http_api_client(pageserver: ZenithPageserver):
|
||||
|
||||
|
||||
def test_pageserver_http_api_client_auth_enabled(pageserver_auth_enabled: ZenithPageserver):
|
||||
client = pageserver_auth_enabled.http_client(auth_token=pageserver_auth_enabled.auth_keys.generate_management_token())
|
||||
client = pageserver_auth_enabled.http_client(
|
||||
auth_token=pageserver_auth_enabled.auth_keys.generate_management_token())
|
||||
check_client(client, pageserver_auth_enabled.initial_tenant)
|
||||
|
||||
@@ -9,17 +9,20 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
|
||||
# times, with fault_probability chance of getting a wal acceptor down or up
|
||||
# along the way. 2 of 3 are always alive, so the work keeps going.
|
||||
def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory: WalAcceptorFactory):
|
||||
def test_pageserver_restart(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory: WalAcceptorFactory):
|
||||
|
||||
# One safekeeper is enough for this test.
|
||||
wa_factory.start_n_new(1)
|
||||
|
||||
zenith_cli.run(["branch", "test_pageserver_restart", "empty"])
|
||||
pg = postgres.create_start('test_pageserver_restart',
|
||||
wal_acceptors=wa_factory.get_connstrs())
|
||||
pg = postgres.create_start('test_pageserver_restart', wal_acceptors=wa_factory.get_connstrs())
|
||||
|
||||
pg_conn = pg.connect()
|
||||
cur = pg_conn.cursor()
|
||||
@@ -41,14 +44,14 @@ def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres:
|
||||
from pg_settings where name = 'shared_buffers'
|
||||
''')
|
||||
row = cur.fetchone()
|
||||
log.info(f"shared_buffers is {row[0]}, table size {row[1]}");
|
||||
log.info(f"shared_buffers is {row[0]}, table size {row[1]}")
|
||||
assert int(row[0]) < int(row[1])
|
||||
|
||||
# Stop and restart pageserver. This is a more or less graceful shutdown, although
|
||||
# the page server doesn't currently have a shutdown routine so there's no difference
|
||||
# between stopping and crashing.
|
||||
pageserver.stop();
|
||||
pageserver.start();
|
||||
pageserver.stop()
|
||||
pageserver.start()
|
||||
|
||||
# Stopping the pageserver breaks the connection from the postgres backend to
|
||||
# the page server, and causes the next query on the connection to fail. Start a new
|
||||
@@ -62,6 +65,5 @@ def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres:
|
||||
assert cur.fetchone() == (100000, )
|
||||
|
||||
# Stop the page server by force, and restart it
|
||||
pageserver.stop();
|
||||
pageserver.start();
|
||||
|
||||
pageserver.stop()
|
||||
pageserver.start()
|
||||
|
||||
@@ -3,6 +3,7 @@ from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
#
|
||||
# Create read-only compute nodes, anchored at historical points in time.
|
||||
#
|
||||
@@ -51,7 +52,8 @@ def test_readonly_node(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
|
||||
print('LSN after 400100 rows: ' + lsn_c)
|
||||
|
||||
# Create first read-only node at the point where only 100 rows were inserted
|
||||
pg_hundred = postgres.create_start("test_readonly_node_hundred", branch=f'test_readonly_node@{lsn_a}')
|
||||
pg_hundred = postgres.create_start("test_readonly_node_hundred",
|
||||
branch=f'test_readonly_node@{lsn_a}')
|
||||
|
||||
# And another at the point where 200100 rows were inserted
|
||||
pg_more = postgres.create_start("test_readonly_node_more", branch=f'test_readonly_node@{lsn_b}')
|
||||
@@ -73,7 +75,8 @@ def test_readonly_node(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
|
||||
assert main_cur.fetchone() == (400100, )
|
||||
|
||||
# Check creating a node at segment boundary
|
||||
pg = postgres.create_start("test_branch_segment_boundary", branch="test_readonly_node@0/3000000")
|
||||
pg = postgres.create_start("test_branch_segment_boundary",
|
||||
branch="test_readonly_node@0/3000000")
|
||||
cur = pg.connect().cursor()
|
||||
cur.execute('SELECT 1')
|
||||
assert cur.fetchone() == (1, )
|
||||
|
||||
@@ -12,13 +12,13 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
#
|
||||
@pytest.mark.parametrize('with_wal_acceptors', [False, True])
|
||||
def test_restart_compute(
|
||||
zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
wa_factory,
|
||||
with_wal_acceptors: bool,
|
||||
):
|
||||
zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
wa_factory,
|
||||
with_wal_acceptors: bool,
|
||||
):
|
||||
wal_acceptor_connstrs = None
|
||||
zenith_cli.run(["branch", "test_restart_compute", "empty"])
|
||||
|
||||
@@ -26,8 +26,7 @@ def test_restart_compute(
|
||||
wa_factory.start_n_new(3)
|
||||
wal_acceptor_connstrs = wa_factory.get_connstrs()
|
||||
|
||||
pg = postgres.create_start('test_restart_compute',
|
||||
wal_acceptors=wal_acceptor_connstrs)
|
||||
pg = postgres.create_start('test_restart_compute', wal_acceptors=wal_acceptor_connstrs)
|
||||
log.info("postgres is running on 'test_restart_compute' branch")
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
@@ -40,9 +39,7 @@ def test_restart_compute(
|
||||
log.info(f"res = {r}")
|
||||
|
||||
# Remove data directory and restart
|
||||
pg.stop_and_destroy().create_start('test_restart_compute',
|
||||
wal_acceptors=wal_acceptor_connstrs)
|
||||
|
||||
pg.stop_and_destroy().create_start('test_restart_compute', wal_acceptors=wal_acceptor_connstrs)
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
@@ -61,8 +58,7 @@ def test_restart_compute(
|
||||
log.info(f"res = {r}")
|
||||
|
||||
# Again remove data directory and restart
|
||||
pg.stop_and_destroy().create_start('test_restart_compute',
|
||||
wal_acceptors=wal_acceptor_connstrs)
|
||||
pg.stop_and_destroy().create_start('test_restart_compute', wal_acceptors=wal_acceptor_connstrs)
|
||||
|
||||
# That select causes lots of FPI's and increases probability of wakeepers
|
||||
# lagging behind after query completion
|
||||
@@ -76,8 +72,7 @@ def test_restart_compute(
|
||||
log.info(f"res = {r}")
|
||||
|
||||
# And again remove data directory and restart
|
||||
pg.stop_and_destroy().create_start('test_restart_compute',
|
||||
wal_acceptors=wal_acceptor_connstrs)
|
||||
pg.stop_and_destroy().create_start('test_restart_compute', wal_acceptors=wal_acceptor_connstrs)
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
@@ -5,10 +5,15 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
def print_gc_result(row):
|
||||
log.info("GC duration {elapsed} ms".format_map(row));
|
||||
log.info(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
|
||||
log.info(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
|
||||
log.info("GC duration {elapsed} ms".format_map(row))
|
||||
log.info(
|
||||
" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}"
|
||||
.format_map(row))
|
||||
log.info(
|
||||
" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}"
|
||||
.format_map(row))
|
||||
|
||||
|
||||
#
|
||||
@@ -24,7 +29,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
with closing(pageserver.connect()) as psconn:
|
||||
with psconn.cursor(cursor_factory = psycopg2.extras.DictCursor) as pscur:
|
||||
with psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) as pscur:
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
@@ -34,9 +39,9 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
cur.execute("CREATE TABLE foo(x integer)")
|
||||
cur.execute("INSERT INTO foo VALUES (1)")
|
||||
|
||||
cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass");
|
||||
row = cur.fetchone();
|
||||
log.info(f"relfilenode is {row[0]}");
|
||||
cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass")
|
||||
row = cur.fetchone()
|
||||
log.info(f"relfilenode is {row[0]}")
|
||||
|
||||
# Run GC, to clear out any garbage left behind in the catalogs by
|
||||
# the CREATE TABLE command. We want to have a clean slate with no garbage
|
||||
@@ -54,9 +59,10 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
log.info("Running GC before test")
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
print_gc_result(row)
|
||||
# remember the number of files
|
||||
layer_relfiles_remain = row['layer_relfiles_total'] - row['layer_relfiles_removed']
|
||||
layer_relfiles_remain = (row['layer_relfiles_total'] -
|
||||
row['layer_relfiles_removed'])
|
||||
assert layer_relfiles_remain > 0
|
||||
|
||||
# Insert a row and run GC. Checkpoint should freeze the layer
|
||||
@@ -66,7 +72,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
cur.execute("INSERT INTO foo VALUES (1)")
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
print_gc_result(row)
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
|
||||
assert row['layer_relfiles_removed'] == 2
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
@@ -80,7 +86,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
print_gc_result(row)
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
|
||||
assert row['layer_relfiles_removed'] == 2
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
@@ -92,7 +98,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
print_gc_result(row)
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
|
||||
assert row['layer_relfiles_removed'] == 2
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
@@ -101,7 +107,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
log.info("Run GC again, with nothing to do")
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
print_gc_result(row)
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain
|
||||
assert row['layer_relfiles_removed'] == 0
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
@@ -109,12 +115,12 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
#
|
||||
# Test DROP TABLE checks that relation data and metadata was deleted by GC from object storage
|
||||
#
|
||||
log.info("Drop table and run GC again");
|
||||
log.info("Drop table and run GC again")
|
||||
cur.execute("DROP TABLE foo")
|
||||
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
print_gc_result(row)
|
||||
|
||||
# We still cannot remove the latest layers
|
||||
# because they serve as tombstones for earlier layers.
|
||||
|
||||
@@ -21,8 +21,18 @@ def test_tenants_normal_work(
|
||||
tenant_1 = tenant_factory.create()
|
||||
tenant_2 = tenant_factory.create()
|
||||
|
||||
zenith_cli.run(["branch", f"test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}", "main", f"--tenantid={tenant_1}"])
|
||||
zenith_cli.run(["branch", f"test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}", "main", f"--tenantid={tenant_2}"])
|
||||
zenith_cli.run([
|
||||
"branch",
|
||||
f"test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}",
|
||||
"main",
|
||||
f"--tenantid={tenant_1}"
|
||||
])
|
||||
zenith_cli.run([
|
||||
"branch",
|
||||
f"test_tenants_normal_work_with_wal_acceptors{with_wal_acceptors}",
|
||||
"main",
|
||||
f"--tenantid={tenant_2}"
|
||||
])
|
||||
if with_wal_acceptors:
|
||||
wa_factory.start_n_new(3)
|
||||
|
||||
@@ -47,4 +57,4 @@ def test_tenants_normal_work(
|
||||
cur.execute("CREATE TABLE t(key int primary key, value text)")
|
||||
cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
|
||||
cur.execute("SELECT sum(key) FROM t")
|
||||
assert cur.fetchone() == (5000050000,)
|
||||
assert cur.fetchone() == (5000050000, )
|
||||
|
||||
@@ -4,9 +4,8 @@ import psycopg2.extras
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
from fixtures.log_helper import log
|
||||
|
||||
def test_timeline_size(
|
||||
zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin
|
||||
):
|
||||
|
||||
def test_timeline_size(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin):
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
zenith_cli.run(["branch", "test_timeline_size", "empty"])
|
||||
|
||||
@@ -23,13 +22,11 @@ def test_timeline_size(
|
||||
|
||||
# Create table, and insert the first 100 rows
|
||||
cur.execute("CREATE TABLE foo (t text)")
|
||||
cur.execute(
|
||||
"""
|
||||
cur.execute("""
|
||||
INSERT INTO foo
|
||||
SELECT 'long string to consume some space' || g
|
||||
FROM generate_series(1, 10) g
|
||||
"""
|
||||
)
|
||||
""")
|
||||
|
||||
res = client.branch_detail(UUID(pageserver.initial_tenant), "test_timeline_size")
|
||||
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
|
||||
|
||||
@@ -9,7 +9,10 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
#
|
||||
# Test branching, when a transaction is in prepared state
|
||||
#
|
||||
def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin: PgBin):
|
||||
def test_twophase(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin: PgBin):
|
||||
zenith_cli.run(["branch", "test_twophase", "empty"])
|
||||
|
||||
pg = postgres.create_start('test_twophase', config_lines=['max_prepared_transactions=5'])
|
||||
@@ -79,8 +82,8 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
|
||||
cur2.execute("ROLLBACK PREPARED 'insert_two'")
|
||||
|
||||
cur2.execute('SELECT * FROM foo')
|
||||
assert cur2.fetchall() == [('one',), ('three',)]
|
||||
assert cur2.fetchall() == [('one', ), ('three', )]
|
||||
|
||||
# Only one committed insert is visible on the original branch
|
||||
cur.execute('SELECT * FROM foo')
|
||||
assert cur.fetchall() == [('three',)]
|
||||
assert cur.fetchall() == [('three', )]
|
||||
|
||||
@@ -3,11 +3,16 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
#
|
||||
# Test that the VM bit is cleared correctly at a HEAP_DELETE and
|
||||
# HEAP_UPDATE record.
|
||||
#
|
||||
def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin, zenith_cli, base_dir):
|
||||
def test_vm_bit_clear(pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
base_dir):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_vm_bit_clear", "empty"])
|
||||
pg = postgres.create_start('test_vm_bit_clear')
|
||||
@@ -49,13 +54,12 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
|
||||
''')
|
||||
|
||||
cur.execute('SELECT * FROM vmtest_delete WHERE id = 1')
|
||||
assert(cur.fetchall() == []);
|
||||
assert (cur.fetchall() == [])
|
||||
cur.execute('SELECT * FROM vmtest_update WHERE id = 1')
|
||||
assert(cur.fetchall() == []);
|
||||
assert (cur.fetchall() == [])
|
||||
|
||||
cur.close()
|
||||
|
||||
|
||||
# Check the same thing on the branch that we created right after the DELETE
|
||||
#
|
||||
# As of this writing, the code in smgrwrite() creates a full-page image whenever
|
||||
@@ -75,6 +79,6 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
|
||||
''')
|
||||
|
||||
cur_new.execute('SELECT * FROM vmtest_delete WHERE id = 1')
|
||||
assert(cur_new.fetchall() == []);
|
||||
assert (cur_new.fetchall() == [])
|
||||
cur_new.execute('SELECT * FROM vmtest_update WHERE id = 1')
|
||||
assert(cur_new.fetchall() == []);
|
||||
assert (cur_new.fetchall() == [])
|
||||
|
||||
@@ -16,7 +16,10 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
# basic test, write something in setup with wal acceptors, ensure that commits
|
||||
# succeed and data is written
|
||||
def test_normal_work(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory):
|
||||
def test_normal_work(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory):
|
||||
zenith_cli.run(["branch", "test_wal_acceptors_normal_work", "empty"])
|
||||
wa_factory.start_n_new(3)
|
||||
pg = postgres.create_start('test_wal_acceptors_normal_work',
|
||||
@@ -34,7 +37,10 @@ def test_normal_work(zenith_cli, pageserver: ZenithPageserver, postgres: Postgre
|
||||
|
||||
# Run page server and multiple acceptors, and multiple compute nodes running
|
||||
# against different timelines.
|
||||
def test_many_timelines(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory):
|
||||
def test_many_timelines(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory):
|
||||
n_timelines = 2
|
||||
|
||||
wa_factory.start_n_new(3)
|
||||
@@ -66,7 +72,10 @@ def test_many_timelines(zenith_cli, pageserver: ZenithPageserver, postgres: Post
|
||||
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
|
||||
# times, with fault_probability chance of getting a wal acceptor down or up
|
||||
# along the way. 2 of 3 are always alive, so the work keeps going.
|
||||
def test_restarts(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory: WalAcceptorFactory):
|
||||
def test_restarts(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory: WalAcceptorFactory):
|
||||
fault_probability = 0.01
|
||||
n_inserts = 1000
|
||||
n_acceptors = 3
|
||||
@@ -177,7 +186,11 @@ def stop_value():
|
||||
|
||||
|
||||
# do inserts while concurrently getting up/down subsets of acceptors
|
||||
def test_race_conditions(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory, stop_value):
|
||||
def test_race_conditions(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory,
|
||||
stop_value):
|
||||
|
||||
wa_factory.start_n_new(3)
|
||||
|
||||
@@ -318,6 +331,5 @@ def test_timeline_status(zenith_cli, pageserver, postgres, wa_factory: WalAccept
|
||||
pg.stop().start()
|
||||
pg.safe_psql("insert into t values(10)")
|
||||
|
||||
epoch_after_reboot = wa_http_cli.timeline_status(tenant_id,
|
||||
timeline_id).acceptor_epoch
|
||||
epoch_after_reboot = wa_http_cli.timeline_status(tenant_id, timeline_id).acceptor_epoch
|
||||
assert epoch_after_reboot > epoch
|
||||
|
||||
@@ -19,13 +19,16 @@ class BankClient(object):
|
||||
async def initdb(self):
|
||||
await self.conn.execute('DROP TABLE IF EXISTS bank_accs')
|
||||
await self.conn.execute('CREATE TABLE bank_accs(uid int primary key, amount int)')
|
||||
await self.conn.execute('''
|
||||
await self.conn.execute(
|
||||
'''
|
||||
INSERT INTO bank_accs
|
||||
SELECT *, $1 FROM generate_series(0, $2)
|
||||
''', self.init_amount, self.n_accounts - 1)
|
||||
''',
|
||||
self.init_amount,
|
||||
self.n_accounts - 1)
|
||||
await self.conn.execute('DROP TABLE IF EXISTS bank_log')
|
||||
await self.conn.execute('CREATE TABLE bank_log(from_uid int, to_uid int, amount int)')
|
||||
|
||||
|
||||
# TODO: Remove when https://github.com/zenithdb/zenith/issues/644 is fixed
|
||||
await self.conn.execute('ALTER TABLE bank_accs SET (autovacuum_enabled = false)')
|
||||
await self.conn.execute('ALTER TABLE bank_log SET (autovacuum_enabled = false)')
|
||||
@@ -34,6 +37,7 @@ class BankClient(object):
|
||||
row = await self.conn.fetchrow('SELECT sum(amount) AS sum FROM bank_accs')
|
||||
assert row['sum'] == self.n_accounts * self.init_amount
|
||||
|
||||
|
||||
async def bank_transfer(conn: asyncpg.Connection, from_uid, to_uid, amount):
|
||||
# avoid deadlocks by sorting uids
|
||||
if from_uid > to_uid:
|
||||
@@ -42,16 +46,22 @@ async def bank_transfer(conn: asyncpg.Connection, from_uid, to_uid, amount):
|
||||
async with conn.transaction():
|
||||
await conn.execute(
|
||||
'UPDATE bank_accs SET amount = amount + ($1) WHERE uid = $2',
|
||||
amount, to_uid,
|
||||
amount,
|
||||
to_uid,
|
||||
)
|
||||
await conn.execute(
|
||||
'UPDATE bank_accs SET amount = amount - ($1) WHERE uid = $2',
|
||||
amount, from_uid,
|
||||
amount,
|
||||
from_uid,
|
||||
)
|
||||
await conn.execute('INSERT INTO bank_log VALUES ($1, $2, $3)',
|
||||
from_uid, to_uid, amount,
|
||||
await conn.execute(
|
||||
'INSERT INTO bank_log VALUES ($1, $2, $3)',
|
||||
from_uid,
|
||||
to_uid,
|
||||
amount,
|
||||
)
|
||||
|
||||
|
||||
class WorkerStats(object):
|
||||
def __init__(self, n_workers):
|
||||
self.counters = [0] * n_workers
|
||||
@@ -114,7 +124,6 @@ async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_
|
||||
worker = run_random_worker(stats, pg, worker_id, bank.n_accounts, max_transfer)
|
||||
workers.append(asyncio.create_task(worker))
|
||||
|
||||
|
||||
for it in range(iterations):
|
||||
victim = acceptors[it % len(acceptors)]
|
||||
victim.stop()
|
||||
@@ -122,10 +131,7 @@ async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_
|
||||
# Wait till previous victim recovers so it is ready for the next
|
||||
# iteration by making any writing xact.
|
||||
conn = await pg.connect_async()
|
||||
await conn.execute(
|
||||
'UPDATE bank_accs SET amount = amount WHERE uid = 1',
|
||||
timeout=120
|
||||
)
|
||||
await conn.execute('UPDATE bank_accs SET amount = amount WHERE uid = 1', timeout=120)
|
||||
await conn.close()
|
||||
|
||||
stats.reset()
|
||||
@@ -145,7 +151,9 @@ async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_
|
||||
|
||||
|
||||
# restart acceptors one by one, while executing and validating bank transactions
|
||||
def test_restarts_under_load(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory,
|
||||
def test_restarts_under_load(zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory: WalAcceptorFactory):
|
||||
|
||||
wa_factory.start_n_new(3)
|
||||
|
||||
@@ -23,8 +23,11 @@ def helper_compare_branch_list(page_server_cur, zenith_cli, initial_tenant: str)
|
||||
|
||||
res = zenith_cli.run(["branch", f"--tenantid={initial_tenant}"])
|
||||
res.check_returncode()
|
||||
branches_cli_with_tenant_arg = sorted(map(lambda b: b.split(':')[-1].strip(), res.stdout.strip().split("\n")))
|
||||
branches_cli_with_tenant_arg = [b for b in branches_cli if b.startswith('test_cli_') or b in ('empty', 'main')]
|
||||
branches_cli_with_tenant_arg = sorted(
|
||||
map(lambda b: b.split(':')[-1].strip(), res.stdout.strip().split("\n")))
|
||||
branches_cli_with_tenant_arg = [
|
||||
b for b in branches_cli if b.startswith('test_cli_') or b in ('empty', 'main')
|
||||
]
|
||||
|
||||
assert branches_api == branches_cli == branches_cli_with_tenant_arg
|
||||
|
||||
@@ -54,6 +57,7 @@ def test_cli_branch_list(pageserver: ZenithPageserver, zenith_cli):
|
||||
assert 'test_cli_branch_list_main' in branches_cli
|
||||
assert 'test_cli_branch_list_nested' in branches_cli
|
||||
|
||||
|
||||
def helper_compare_tenant_list(page_server_cur, zenith_cli: ZenithCli):
|
||||
page_server_cur.execute(f'tenant_list')
|
||||
tenants_api = sorted(json.loads(page_server_cur.fetchone()[0]))
|
||||
|
||||
@@ -6,8 +6,14 @@ from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
def test_isolation(pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin, zenith_cli, test_output_dir, pg_distrib_dir,
|
||||
base_dir, capsys):
|
||||
def test_isolation(pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
test_output_dir,
|
||||
pg_distrib_dir,
|
||||
base_dir,
|
||||
capsys):
|
||||
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_isolation", "empty"])
|
||||
|
||||
@@ -6,8 +6,14 @@ from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, check_re
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
def test_pg_regress(pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin, zenith_cli, test_output_dir, pg_distrib_dir,
|
||||
base_dir, capsys):
|
||||
def test_pg_regress(pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
test_output_dir,
|
||||
pg_distrib_dir,
|
||||
base_dir,
|
||||
capsys):
|
||||
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_pg_regress", "empty"])
|
||||
|
||||
@@ -7,8 +7,14 @@ from fixtures.log_helper import log
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
def test_zenith_regress(postgres: PostgresFactory, pg_bin, zenith_cli, test_output_dir, pg_distrib_dir,
|
||||
base_dir, capsys, pageserver_port: PageserverPort):
|
||||
def test_zenith_regress(postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
test_output_dir,
|
||||
pg_distrib_dir,
|
||||
base_dir,
|
||||
capsys,
|
||||
pageserver_port: PageserverPort):
|
||||
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_zenith_regress", "empty"])
|
||||
|
||||
@@ -24,7 +24,6 @@ from typing import Any, Callable, Dict, Iterator, List, Optional, TypeVar, cast
|
||||
from typing_extensions import Literal
|
||||
|
||||
from .utils import (get_self_dir, mkdir_if_needed, subprocess_capture)
|
||||
|
||||
"""
|
||||
This file contains fixtures for micro-benchmarks.
|
||||
|
||||
@@ -55,7 +54,6 @@ in the test initialization, or measure disk usage after the test query.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
# All the results are collected in this list, as a tuple:
|
||||
# (test_name: str, metric_name: str, metric_value: float, unit: str)
|
||||
#
|
||||
@@ -65,6 +63,7 @@ in the test initialization, or measure disk usage after the test query.
|
||||
global zenbenchmark_results
|
||||
zenbenchmark_results = []
|
||||
|
||||
|
||||
class ZenithBenchmarkResults:
|
||||
""" An object for recording benchmark results. """
|
||||
def __init__(self):
|
||||
@@ -77,6 +76,7 @@ class ZenithBenchmarkResults:
|
||||
|
||||
self.results.append((test_name, metric_name, metric_value, unit))
|
||||
|
||||
|
||||
# Session scope fixture that initializes the results object
|
||||
@pytest.fixture(autouse=True, scope='session')
|
||||
def zenbenchmark_global(request) -> Iterator[ZenithBenchmarkResults]:
|
||||
@@ -88,6 +88,7 @@ def zenbenchmark_global(request) -> Iterator[ZenithBenchmarkResults]:
|
||||
|
||||
yield zenbenchmark_results
|
||||
|
||||
|
||||
class ZenithBenchmarker:
|
||||
"""
|
||||
An object for recording benchmark results. This is created for each test
|
||||
@@ -103,7 +104,6 @@ class ZenithBenchmarker:
|
||||
"""
|
||||
self.results.record(self.request.node.name, metric_name, metric_value, unit)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def record_duration(self, metric_name):
|
||||
"""
|
||||
@@ -134,7 +134,8 @@ class ZenithBenchmarker:
|
||||
# The metric should be an integer, as it's a number of bytes. But in general
|
||||
# all prometheus metrics are floats. So to be pedantic, read it as a float
|
||||
# and round to integer.
|
||||
matches = re.search(r'^pageserver_disk_io_bytes{io_operation="write"} (\S+)$', all_metrics,
|
||||
matches = re.search(r'^pageserver_disk_io_bytes{io_operation="write"} (\S+)$',
|
||||
all_metrics,
|
||||
re.MULTILINE)
|
||||
return int(round(float(matches.group(1))))
|
||||
|
||||
@@ -145,8 +146,7 @@ class ZenithBenchmarker:
|
||||
# Fetch all the exposed prometheus metrics from page server
|
||||
all_metrics = pageserver.http_client().get_metrics()
|
||||
# See comment in get_io_writes()
|
||||
matches = re.search(r'^pageserver_maxrss_kb (\S+)$', all_metrics,
|
||||
re.MULTILINE)
|
||||
matches = re.search(r'^pageserver_maxrss_kb (\S+)$', all_metrics, re.MULTILINE)
|
||||
return int(round(float(matches.group(1))))
|
||||
|
||||
def get_timeline_size(self, repo_dir: str, tenantid: str, timelineid: str):
|
||||
@@ -171,7 +171,11 @@ class ZenithBenchmarker:
|
||||
yield
|
||||
after = self.get_io_writes(pageserver)
|
||||
|
||||
self.results.record(self.request.node.name, metric_name, round((after - before) / (1024 * 1024)), 'MB')
|
||||
self.results.record(self.request.node.name,
|
||||
metric_name,
|
||||
round((after - before) / (1024 * 1024)),
|
||||
'MB')
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def zenbenchmark(zenbenchmark_global, request) -> Iterator[ZenithBenchmarker]:
|
||||
@@ -185,9 +189,7 @@ def zenbenchmark(zenbenchmark_global, request) -> Iterator[ZenithBenchmarker]:
|
||||
|
||||
# Hook to print the results at the end
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_terminal_summary(
|
||||
terminalreporter: TerminalReporter, exitstatus: int, config: Config
|
||||
):
|
||||
def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config: Config):
|
||||
yield
|
||||
|
||||
global zenbenchmark_results
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import logging.config
|
||||
|
||||
"""
|
||||
This file configures logging to use in python tests.
|
||||
Logs are automatically captured and shown in their
|
||||
@@ -27,17 +26,19 @@ LOGGING = {
|
||||
"level": "INFO"
|
||||
},
|
||||
"root.wal_acceptor_async": {
|
||||
"level": "INFO" # a lot of logs on DEBUG level
|
||||
"level": "INFO" # a lot of logs on DEBUG level
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def getLogger(name='root') -> logging.Logger:
|
||||
"""Method to get logger for tests.
|
||||
|
||||
Should be used to get correctly initialized logger. """
|
||||
return logging.getLogger(name)
|
||||
|
||||
|
||||
# default logger for tests
|
||||
log = getLogger()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import subprocess
|
||||
from typing import Any, List
|
||||
from fixtures.log_helper import log
|
||||
|
||||
|
||||
def get_self_dir() -> str:
|
||||
""" Get the path to the directory where this script lives. """
|
||||
return os.path.dirname(os.path.abspath(__file__))
|
||||
@@ -58,6 +59,7 @@ def global_counter() -> int:
|
||||
_global_counter += 1
|
||||
return _global_counter
|
||||
|
||||
|
||||
def lsn_to_hex(num: int) -> str:
|
||||
""" Convert lsn from int to standard hex notation. """
|
||||
return "{:X}/{:X}".format(num >> 32, num & 0xffffffff)
|
||||
|
||||
@@ -27,7 +27,6 @@ import requests
|
||||
|
||||
from .utils import (get_self_dir, mkdir_if_needed, subprocess_capture)
|
||||
from fixtures.log_helper import log
|
||||
|
||||
"""
|
||||
This file contains pytest fixtures. A fixture is a test resource that can be
|
||||
summoned by placing its name in the test's arguments.
|
||||
@@ -55,14 +54,15 @@ DEFAULT_POSTGRES_DIR = 'tmp_install'
|
||||
BASE_PORT = 15000
|
||||
WORKER_PORT_NUM = 100
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
"""
|
||||
Ensure that no unwanted daemons are running before we start testing.
|
||||
Check that we do not owerflow available ports range.
|
||||
"""
|
||||
numprocesses = config.getoption('numprocesses')
|
||||
if numprocesses is not None and BASE_PORT + numprocesses * WORKER_PORT_NUM > 32768: # do not use ephemeral ports
|
||||
raise Exception('Too many workers configured. Cannot distrubute ports for services.')
|
||||
if numprocesses is not None and BASE_PORT + numprocesses * WORKER_PORT_NUM > 32768: # do not use ephemeral ports
|
||||
raise Exception('Too many workers configured. Cannot distrubute ports for services.')
|
||||
|
||||
# does not use -c as it is not supported on macOS
|
||||
cmd = ['pgrep', 'pageserver|postgres|safekeeper']
|
||||
@@ -106,7 +106,11 @@ class PgProtocol:
|
||||
self.port = port
|
||||
self.username = username or "zenith_admin"
|
||||
|
||||
def connstr(self, *, dbname: str = 'postgres', username: Optional[str] = None, password: Optional[str] = None) -> str:
|
||||
def connstr(self,
|
||||
*,
|
||||
dbname: str = 'postgres',
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None) -> str:
|
||||
"""
|
||||
Build a libpq connection string for the Postgres instance.
|
||||
"""
|
||||
@@ -118,7 +122,12 @@ class PgProtocol:
|
||||
return f'{res} password={password}'
|
||||
|
||||
# autocommit=True here by default because that's what we need most of the time
|
||||
def connect(self, *, autocommit=True, dbname: str = 'postgres', username: Optional[str] = None, password: Optional[str] = None) -> PgConnection:
|
||||
def connect(self,
|
||||
*,
|
||||
autocommit=True,
|
||||
dbname: str = 'postgres',
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None) -> PgConnection:
|
||||
"""
|
||||
Connect to the node.
|
||||
Returns psycopg2's connection object.
|
||||
@@ -134,7 +143,11 @@ class PgProtocol:
|
||||
conn.autocommit = autocommit
|
||||
return conn
|
||||
|
||||
async def connect_async(self, *, dbname: str = 'postgres', username: Optional[str] = None, password: Optional[str] = None) -> asyncpg.Connection:
|
||||
async def connect_async(self,
|
||||
*,
|
||||
dbname: str = 'postgres',
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None) -> asyncpg.Connection:
|
||||
"""
|
||||
Connect to the node from async python.
|
||||
Returns asyncpg's connection object.
|
||||
@@ -200,11 +213,11 @@ class ZenithCli:
|
||||
# Interceipt CalledProcessError and print more info
|
||||
try:
|
||||
res = subprocess.run(args,
|
||||
env=self.env,
|
||||
check=True,
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
env=self.env,
|
||||
check=True,
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
# this way command output will be in recorded and shown in CI in failure message
|
||||
msg = f"""\
|
||||
@@ -242,21 +255,17 @@ class ZenithPageserverHttpClient(requests.Session):
|
||||
return res.json()
|
||||
|
||||
def branch_create(self, tenant_id: uuid.UUID, name: str, start_point: str) -> Dict:
|
||||
res = self.post(
|
||||
f"http://localhost:{self.port}/v1/branch",
|
||||
json={
|
||||
'tenant_id': tenant_id.hex,
|
||||
'name': name,
|
||||
'start_point': start_point,
|
||||
}
|
||||
)
|
||||
res = self.post(f"http://localhost:{self.port}/v1/branch",
|
||||
json={
|
||||
'tenant_id': tenant_id.hex,
|
||||
'name': name,
|
||||
'start_point': start_point,
|
||||
})
|
||||
res.raise_for_status()
|
||||
return res.json()
|
||||
|
||||
def branch_detail(self, tenant_id: uuid.UUID, name: str) -> Dict:
|
||||
res = self.get(
|
||||
f"http://localhost:{self.port}/v1/branch/{tenant_id.hex}/{name}",
|
||||
)
|
||||
res = self.get(f"http://localhost:{self.port}/v1/branch/{tenant_id.hex}/{name}", )
|
||||
res.raise_for_status()
|
||||
return res.json()
|
||||
|
||||
@@ -298,7 +307,11 @@ class AuthKeys:
|
||||
return token
|
||||
|
||||
def generate_tenant_token(self, tenant_id):
|
||||
token = jwt.encode({"scope": "tenant", "tenant_id": tenant_id}, self.priv, algorithm="RS256")
|
||||
token = jwt.encode({
|
||||
"scope": "tenant", "tenant_id": tenant_id
|
||||
},
|
||||
self.priv,
|
||||
algorithm="RS256")
|
||||
|
||||
if isinstance(token, bytes):
|
||||
token = token.decode()
|
||||
@@ -323,6 +336,7 @@ def worker_base_port(worker_seq_no: int):
|
||||
# so workers have disjoint set of ports for services
|
||||
return BASE_PORT + worker_seq_no * WORKER_PORT_NUM
|
||||
|
||||
|
||||
class PortDistributor:
|
||||
def __init__(self, base_port: int, port_number: int) -> None:
|
||||
self.iterator = iter(range(base_port, base_port + port_number))
|
||||
@@ -331,13 +345,15 @@ class PortDistributor:
|
||||
try:
|
||||
return next(self.iterator)
|
||||
except StopIteration:
|
||||
raise RuntimeError('port range configured for test is exhausted, consider enlarging the range')
|
||||
raise RuntimeError(
|
||||
'port range configured for test is exhausted, consider enlarging the range')
|
||||
|
||||
|
||||
@zenfixture
|
||||
def port_distributor(worker_base_port):
|
||||
return PortDistributor(base_port=worker_base_port, port_number=WORKER_PORT_NUM)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PageserverPort:
|
||||
pg: int
|
||||
@@ -352,14 +368,18 @@ class ZenithPageserver(PgProtocol):
|
||||
self.running = False
|
||||
self.initial_tenant = None
|
||||
self.repo_dir = repo_dir
|
||||
self.service_port = port # do not shadow PgProtocol.port which is just int
|
||||
self.service_port = port # do not shadow PgProtocol.port which is just int
|
||||
|
||||
def init(self, enable_auth: bool = False) -> 'ZenithPageserver':
|
||||
"""
|
||||
Initialize the repository, i.e. run "zenith init".
|
||||
Returns self.
|
||||
"""
|
||||
cmd = ['init', f'--pageserver-pg-port={self.service_port.pg}', f'--pageserver-http-port={self.service_port.http}']
|
||||
cmd = [
|
||||
'init',
|
||||
f'--pageserver-pg-port={self.service_port.pg}',
|
||||
f'--pageserver-http-port={self.service_port.http}'
|
||||
]
|
||||
if enable_auth:
|
||||
cmd.append('--enable-auth')
|
||||
self.zenith_cli.run(cmd)
|
||||
@@ -419,8 +439,6 @@ class ZenithPageserver(PgProtocol):
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
@zenfixture
|
||||
def pageserver_port(port_distributor: PortDistributor) -> PageserverPort:
|
||||
pg = port_distributor.get_port()
|
||||
@@ -430,7 +448,8 @@ def pageserver_port(port_distributor: PortDistributor) -> PageserverPort:
|
||||
|
||||
|
||||
@zenfixture
|
||||
def pageserver(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: PageserverPort) -> Iterator[ZenithPageserver]:
|
||||
def pageserver(zenith_cli: ZenithCli, repo_dir: str,
|
||||
pageserver_port: PageserverPort) -> Iterator[ZenithPageserver]:
|
||||
"""
|
||||
The 'pageserver' fixture provides a Page Server that's up and running.
|
||||
|
||||
@@ -442,7 +461,8 @@ def pageserver(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: Pageserver
|
||||
By convention, the test branches are named after the tests. For example,
|
||||
test called 'test_foo' would create and use branches with the 'test_foo' prefix.
|
||||
"""
|
||||
ps = ZenithPageserver(zenith_cli=zenith_cli, repo_dir=repo_dir, port=pageserver_port).init().start()
|
||||
ps = ZenithPageserver(zenith_cli=zenith_cli, repo_dir=repo_dir,
|
||||
port=pageserver_port).init().start()
|
||||
# For convenience in tests, create a branch from the freshly-initialized cluster.
|
||||
zenith_cli.run(["branch", "empty", "main"])
|
||||
|
||||
@@ -452,6 +472,7 @@ def pageserver(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: Pageserver
|
||||
log.info('Starting pageserver cleanup')
|
||||
ps.stop(True)
|
||||
|
||||
|
||||
class PgBin:
|
||||
""" A helper class for executing postgres binaries """
|
||||
def __init__(self, log_dir: str, pg_distrib_dir: str):
|
||||
@@ -513,9 +534,11 @@ class PgBin:
|
||||
def pg_bin(test_output_dir: str, pg_distrib_dir: str) -> PgBin:
|
||||
return PgBin(test_output_dir, pg_distrib_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pageserver_auth_enabled(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: PageserverPort):
|
||||
with ZenithPageserver(zenith_cli=zenith_cli, repo_dir=repo_dir, port=pageserver_port).init(enable_auth=True).start() as ps:
|
||||
with ZenithPageserver(zenith_cli=zenith_cli, repo_dir=repo_dir,
|
||||
port=pageserver_port).init(enable_auth=True).start() as ps:
|
||||
# For convenience in tests, create a branch from the freshly-initialized cluster.
|
||||
zenith_cli.run(["branch", "empty", "main"])
|
||||
yield ps
|
||||
@@ -523,14 +546,19 @@ def pageserver_auth_enabled(zenith_cli: ZenithCli, repo_dir: str, pageserver_por
|
||||
|
||||
class Postgres(PgProtocol):
|
||||
""" An object representing a running postgres daemon. """
|
||||
def __init__(self, zenith_cli: ZenithCli, repo_dir: str, pg_bin: PgBin, tenant_id: str, port: int):
|
||||
def __init__(self,
|
||||
zenith_cli: ZenithCli,
|
||||
repo_dir: str,
|
||||
pg_bin: PgBin,
|
||||
tenant_id: str,
|
||||
port: int):
|
||||
super().__init__(host='localhost', port=port)
|
||||
|
||||
self.zenith_cli = zenith_cli
|
||||
self.running = False
|
||||
self.repo_dir = repo_dir
|
||||
self.node_name: Optional[str] = None # dubious, see asserts below
|
||||
self.pgdata_dir: Optional[str] = None # Path to computenode PGDATA
|
||||
self.pgdata_dir: Optional[str] = None # Path to computenode PGDATA
|
||||
self.tenant_id = tenant_id
|
||||
self.pg_bin = pg_bin
|
||||
# path to conf is <repo_dir>/pgdatadirs/tenants/<tenant_id>/<node_name>/postgresql.conf
|
||||
@@ -555,7 +583,14 @@ class Postgres(PgProtocol):
|
||||
if branch is None:
|
||||
branch = node_name
|
||||
|
||||
self.zenith_cli.run(['pg', 'create', f'--tenantid={self.tenant_id}', f'--port={self.port}', node_name, branch])
|
||||
self.zenith_cli.run([
|
||||
'pg',
|
||||
'create',
|
||||
f'--tenantid={self.tenant_id}',
|
||||
f'--port={self.port}',
|
||||
node_name,
|
||||
branch
|
||||
])
|
||||
self.node_name = node_name
|
||||
path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.node_name
|
||||
self.pgdata_dir = os.path.join(self.repo_dir, path)
|
||||
@@ -578,7 +613,8 @@ class Postgres(PgProtocol):
|
||||
|
||||
log.info(f"Starting postgres node {self.node_name}")
|
||||
|
||||
run_result = self.zenith_cli.run(['pg', 'start', f'--tenantid={self.tenant_id}', f'--port={self.port}', self.node_name])
|
||||
run_result = self.zenith_cli.run(
|
||||
['pg', 'start', f'--tenantid={self.tenant_id}', f'--port={self.port}', self.node_name])
|
||||
self.running = True
|
||||
|
||||
log.info(f"stdout: {run_result.stdout}")
|
||||
@@ -658,7 +694,8 @@ class Postgres(PgProtocol):
|
||||
|
||||
assert self.node_name is not None
|
||||
assert self.tenant_id is not None
|
||||
self.zenith_cli.run(['pg', 'stop', '--destroy', self.node_name, f'--tenantid={self.tenant_id}'])
|
||||
self.zenith_cli.run(
|
||||
['pg', 'stop', '--destroy', self.node_name, f'--tenantid={self.tenant_id}'])
|
||||
|
||||
return self
|
||||
|
||||
@@ -690,9 +727,15 @@ class Postgres(PgProtocol):
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
self.stop()
|
||||
|
||||
|
||||
class PostgresFactory:
|
||||
""" An object representing multiple running postgres daemons. """
|
||||
def __init__(self, zenith_cli: ZenithCli, repo_dir: str, pg_bin: PgBin, initial_tenant: str, port_distributor: PortDistributor):
|
||||
def __init__(self,
|
||||
zenith_cli: ZenithCli,
|
||||
repo_dir: str,
|
||||
pg_bin: PgBin,
|
||||
initial_tenant: str,
|
||||
port_distributor: PortDistributor):
|
||||
self.zenith_cli = zenith_cli
|
||||
self.repo_dir = repo_dir
|
||||
self.num_instances = 0
|
||||
@@ -701,14 +744,12 @@ class PostgresFactory:
|
||||
self.port_distributor = port_distributor
|
||||
self.pg_bin = pg_bin
|
||||
|
||||
def create_start(
|
||||
self,
|
||||
node_name: str = "main",
|
||||
branch: Optional[str] = None,
|
||||
tenant_id: Optional[str] = None,
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None
|
||||
) -> Postgres:
|
||||
def create_start(self,
|
||||
node_name: str = "main",
|
||||
branch: Optional[str] = None,
|
||||
tenant_id: Optional[str] = None,
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None) -> Postgres:
|
||||
|
||||
pg = Postgres(
|
||||
zenith_cli=self.zenith_cli,
|
||||
@@ -727,14 +768,12 @@ class PostgresFactory:
|
||||
config_lines=config_lines,
|
||||
)
|
||||
|
||||
def create(
|
||||
self,
|
||||
node_name: str = "main",
|
||||
branch: Optional[str] = None,
|
||||
tenant_id: Optional[str] = None,
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None
|
||||
) -> Postgres:
|
||||
def create(self,
|
||||
node_name: str = "main",
|
||||
branch: Optional[str] = None,
|
||||
tenant_id: Optional[str] = None,
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None) -> Postgres:
|
||||
|
||||
pg = Postgres(
|
||||
zenith_cli=self.zenith_cli,
|
||||
@@ -754,13 +793,11 @@ class PostgresFactory:
|
||||
config_lines=config_lines,
|
||||
)
|
||||
|
||||
def config(
|
||||
self,
|
||||
node_name: str = "main",
|
||||
tenant_id: Optional[str] = None,
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None
|
||||
) -> Postgres:
|
||||
def config(self,
|
||||
node_name: str = "main",
|
||||
tenant_id: Optional[str] = None,
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None) -> Postgres:
|
||||
|
||||
pg = Postgres(
|
||||
zenith_cli=self.zenith_cli,
|
||||
@@ -785,13 +822,18 @@ class PostgresFactory:
|
||||
|
||||
return self
|
||||
|
||||
|
||||
@zenfixture
|
||||
def initial_tenant(pageserver: ZenithPageserver):
|
||||
return pageserver.initial_tenant
|
||||
|
||||
|
||||
@zenfixture
|
||||
def postgres(zenith_cli: ZenithCli, initial_tenant: str, repo_dir: str, pg_bin: PgBin, port_distributor: PortDistributor) -> Iterator[PostgresFactory]:
|
||||
def postgres(zenith_cli: ZenithCli,
|
||||
initial_tenant: str,
|
||||
repo_dir: str,
|
||||
pg_bin: PgBin,
|
||||
port_distributor: PortDistributor) -> Iterator[PostgresFactory]:
|
||||
pgfactory = PostgresFactory(
|
||||
zenith_cli=zenith_cli,
|
||||
repo_dir=repo_dir,
|
||||
@@ -806,6 +848,7 @@ def postgres(zenith_cli: ZenithCli, initial_tenant: str, repo_dir: str, pg_bin:
|
||||
log.info('Starting postgres cleanup')
|
||||
pgfactory.stop_all()
|
||||
|
||||
|
||||
def read_pid(path: Path):
|
||||
""" Read content of file into number """
|
||||
return int(path.read_text())
|
||||
@@ -816,13 +859,14 @@ class WalAcceptorPort:
|
||||
pg: int
|
||||
http: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class WalAcceptor:
|
||||
""" An object representing a running wal acceptor daemon. """
|
||||
wa_bin_path: Path
|
||||
data_dir: Path
|
||||
port: WalAcceptorPort
|
||||
num: int # identifier for logging
|
||||
num: int # identifier for logging
|
||||
pageserver_port: int
|
||||
auth_token: Optional[str] = None
|
||||
|
||||
@@ -887,10 +931,11 @@ class WalAcceptor:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except Exception:
|
||||
# TODO: cleanup pid file on exit in wal acceptor
|
||||
pass # pidfile might be obsolete
|
||||
pass # pidfile might be obsolete
|
||||
return self
|
||||
|
||||
def append_logical_message(self, tenant_id: str, timeline_id: str, request: Dict[str, Any]) -> Dict[str, Any]:
|
||||
def append_logical_message(self, tenant_id: str, timeline_id: str,
|
||||
request: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Send JSON_CTRL query to append LogicalMessage to WAL and modify
|
||||
safekeeper state. It will construct LogicalMessage from provided
|
||||
@@ -918,7 +963,11 @@ class WalAcceptor:
|
||||
|
||||
class WalAcceptorFactory:
|
||||
""" An object representing multiple running wal acceptors. """
|
||||
def __init__(self, zenith_binpath: Path, data_dir: Path, pageserver_port: int, port_distributor: PortDistributor):
|
||||
def __init__(self,
|
||||
zenith_binpath: Path,
|
||||
data_dir: Path,
|
||||
pageserver_port: int,
|
||||
port_distributor: PortDistributor):
|
||||
self.wa_bin_path = zenith_binpath / 'safekeeper'
|
||||
self.data_dir = data_dir
|
||||
self.instances: List[WalAcceptor] = []
|
||||
@@ -964,7 +1013,10 @@ class WalAcceptorFactory:
|
||||
|
||||
|
||||
@zenfixture
|
||||
def wa_factory(zenith_binpath: str, repo_dir: str, pageserver_port: PageserverPort, port_distributor: PortDistributor) -> Iterator[WalAcceptorFactory]:
|
||||
def wa_factory(zenith_binpath: str,
|
||||
repo_dir: str,
|
||||
pageserver_port: PageserverPort,
|
||||
port_distributor: PortDistributor) -> Iterator[WalAcceptorFactory]:
|
||||
""" Gives WalAcceptorFactory providing wal acceptors. """
|
||||
wafactory = WalAcceptorFactory(
|
||||
zenith_binpath=Path(zenith_binpath),
|
||||
@@ -977,10 +1029,12 @@ def wa_factory(zenith_binpath: str, repo_dir: str, pageserver_port: PageserverPo
|
||||
log.info('Starting wal acceptors cleanup')
|
||||
wafactory.stop_all()
|
||||
|
||||
|
||||
@dataclass
|
||||
class PageserverTimelineStatus:
|
||||
acceptor_epoch: int
|
||||
|
||||
|
||||
class WalAcceptorHttpClient(requests.Session):
|
||||
def __init__(self, port: int) -> None:
|
||||
super().__init__()
|
||||
@@ -1094,6 +1148,7 @@ class TenantFactory:
|
||||
def tenant_factory(zenith_cli: ZenithCli):
|
||||
return TenantFactory(zenith_cli)
|
||||
|
||||
|
||||
#
|
||||
# Test helpers
|
||||
#
|
||||
@@ -1104,8 +1159,15 @@ def list_files_to_compare(pgdata_dir: str):
|
||||
rel_dir = os.path.relpath(root, pgdata_dir)
|
||||
# Skip some dirs and files we don't want to compare
|
||||
skip_dirs = ['pg_wal', 'pg_stat', 'pg_stat_tmp', 'pg_subtrans', 'pg_logical']
|
||||
skip_files = ['pg_internal.init', 'pg.log', 'zenith.signal', 'postgresql.conf',
|
||||
'postmaster.opts', 'postmaster.pid', 'pg_control']
|
||||
skip_files = [
|
||||
'pg_internal.init',
|
||||
'pg.log',
|
||||
'zenith.signal',
|
||||
'postgresql.conf',
|
||||
'postmaster.opts',
|
||||
'postmaster.pid',
|
||||
'pg_control'
|
||||
]
|
||||
if rel_dir not in skip_dirs and filename not in skip_files:
|
||||
rel_file = os.path.join(rel_dir, filename)
|
||||
pgdata_files.append(rel_file)
|
||||
@@ -1114,8 +1176,12 @@ def list_files_to_compare(pgdata_dir: str):
|
||||
log.info(pgdata_files)
|
||||
return pgdata_files
|
||||
|
||||
|
||||
# pg is the existing and running compute node, that we want to compare with a basebackup
|
||||
def check_restored_datadir_content(zenith_cli: ZenithCli, test_output_dir: str, pg: Postgres, pageserver_pg_port: int):
|
||||
def check_restored_datadir_content(zenith_cli: ZenithCli,
|
||||
test_output_dir: str,
|
||||
pg: Postgres,
|
||||
pageserver_pg_port: int):
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'basebackup' command
|
||||
with closing(pg.connect()) as conn:
|
||||
|
||||
@@ -5,6 +5,7 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
|
||||
#
|
||||
# Run bulk INSERT test.
|
||||
#
|
||||
@@ -15,7 +16,12 @@ pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
# 3. Disk space used
|
||||
# 4. Peak memory usage
|
||||
#
|
||||
def test_bulk_insert(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin, zenith_cli, zenbenchmark, repo_dir: str):
|
||||
def test_bulk_insert(postgres: PostgresFactory,
|
||||
pageserver: ZenithPageserver,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
zenbenchmark,
|
||||
repo_dir: str):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_bulk_insert", "empty"])
|
||||
|
||||
@@ -24,7 +30,7 @@ def test_bulk_insert(postgres: PostgresFactory, pageserver: ZenithPageserver, pg
|
||||
|
||||
# Open a connection directly to the page server that we'll use to force
|
||||
# flushing the layers to disk
|
||||
psconn = pageserver.connect();
|
||||
psconn = pageserver.connect()
|
||||
pscur = psconn.cursor()
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
@@ -48,5 +54,7 @@ def test_bulk_insert(postgres: PostgresFactory, pageserver: ZenithPageserver, pg
|
||||
zenbenchmark.record("peak_mem", zenbenchmark.get_peak_mem(pageserver) / 1024, 'MB')
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir,
|
||||
pageserver.initial_tenant,
|
||||
timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024 * 1024), 'MB')
|
||||
|
||||
@@ -37,7 +37,9 @@ def test_bulk_tenant_create(
|
||||
|
||||
tenant = tenant_factory.create()
|
||||
zenith_cli.run([
|
||||
"branch", f"test_bulk_tenant_create_{tenants_count}_{i}_{use_wal_acceptors}", "main",
|
||||
"branch",
|
||||
f"test_bulk_tenant_create_{tenants_count}_{i}_{use_wal_acceptors}",
|
||||
"main",
|
||||
f"--tenantid={tenant}"
|
||||
])
|
||||
|
||||
|
||||
@@ -5,12 +5,18 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
|
||||
#
|
||||
# Test buffering GisT build. It WAL-logs the whole relation, in 32-page chunks.
|
||||
# As of this writing, we're duplicate those giant WAL records for each page,
|
||||
# which makes the delta layer about 32x larger than it needs to be.
|
||||
#
|
||||
def test_gist_buffering_build(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin, zenith_cli, zenbenchmark, repo_dir: str):
|
||||
def test_gist_buffering_build(postgres: PostgresFactory,
|
||||
pageserver: ZenithPageserver,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
zenbenchmark,
|
||||
repo_dir: str):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_gist_buffering_build", "empty"])
|
||||
|
||||
@@ -19,7 +25,7 @@ def test_gist_buffering_build(postgres: PostgresFactory, pageserver: ZenithPages
|
||||
|
||||
# Open a connection directly to the page server that we'll use to force
|
||||
# flushing the layers to disk
|
||||
psconn = pageserver.connect();
|
||||
psconn = pageserver.connect()
|
||||
pscur = psconn.cursor()
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
@@ -29,13 +35,17 @@ def test_gist_buffering_build(postgres: PostgresFactory, pageserver: ZenithPages
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
# Create test table.
|
||||
cur.execute("create table gist_point_tbl(id int4, p point)");
|
||||
cur.execute("insert into gist_point_tbl select g, point(g, g) from generate_series(1, 1000000) g;");
|
||||
cur.execute("create table gist_point_tbl(id int4, p point)")
|
||||
cur.execute(
|
||||
"insert into gist_point_tbl select g, point(g, g) from generate_series(1, 1000000) g;"
|
||||
)
|
||||
|
||||
# Build the index.
|
||||
with zenbenchmark.record_pageserver_writes(pageserver, 'pageserver_writes'):
|
||||
with zenbenchmark.record_duration('build'):
|
||||
cur.execute("create index gist_pointidx2 on gist_point_tbl using gist(p) with (buffering = on)");
|
||||
cur.execute(
|
||||
"create index gist_pointidx2 on gist_point_tbl using gist(p) with (buffering = on)"
|
||||
)
|
||||
|
||||
# Flush the layers from memory to disk. This is included in the reported
|
||||
# time and I/O
|
||||
@@ -45,5 +55,7 @@ def test_gist_buffering_build(postgres: PostgresFactory, pageserver: ZenithPages
|
||||
zenbenchmark.record("peak_mem", zenbenchmark.get_peak_mem(pageserver) / 1024, 'MB')
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir,
|
||||
pageserver.initial_tenant,
|
||||
timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024 * 1024), 'MB')
|
||||
|
||||
@@ -5,6 +5,7 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
|
||||
#
|
||||
# Run a very short pgbench test.
|
||||
#
|
||||
@@ -14,7 +15,12 @@ pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
# 2. Time to run 5000 pgbench transactions
|
||||
# 3. Disk space used
|
||||
#
|
||||
def test_pgbench(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin, zenith_cli, zenbenchmark, repo_dir: str):
|
||||
def test_pgbench(postgres: PostgresFactory,
|
||||
pageserver: ZenithPageserver,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
zenbenchmark,
|
||||
repo_dir: str):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_pgbench_perf", "empty"])
|
||||
|
||||
@@ -23,7 +29,7 @@ def test_pgbench(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin
|
||||
|
||||
# Open a connection directly to the page server that we'll use to force
|
||||
# flushing the layers to disk
|
||||
psconn = pageserver.connect();
|
||||
psconn = pageserver.connect()
|
||||
pscur = psconn.cursor()
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
@@ -53,4 +59,4 @@ def test_pgbench(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
zenbenchmark.record('size', timeline_size / (1024 * 1024), 'MB')
|
||||
|
||||
@@ -17,7 +17,13 @@ from fixtures.log_helper import log
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
def test_write_amplification(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin, zenith_cli, zenbenchmark, repo_dir: str):
|
||||
|
||||
def test_write_amplification(postgres: PostgresFactory,
|
||||
pageserver: ZenithPageserver,
|
||||
pg_bin,
|
||||
zenith_cli,
|
||||
zenbenchmark,
|
||||
repo_dir: str):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_write_amplification", "empty"])
|
||||
|
||||
@@ -26,7 +32,7 @@ def test_write_amplification(postgres: PostgresFactory, pageserver: ZenithPagese
|
||||
|
||||
# Open a connection directly to the page server that we'll use to force
|
||||
# flushing the layers to disk
|
||||
psconn = pageserver.connect();
|
||||
psconn = pageserver.connect()
|
||||
pscur = psconn.cursor()
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
@@ -71,5 +77,7 @@ def test_write_amplification(postgres: PostgresFactory, pageserver: ZenithPagese
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir,
|
||||
pageserver.initial_tenant,
|
||||
timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024 * 1024), 'MB')
|
||||
|
||||
@@ -10,6 +10,7 @@ max-line-length = 100
|
||||
[yapf]
|
||||
based_on_style = pep8
|
||||
column_limit = 100
|
||||
split_all_top_level_comma_separated_values = true
|
||||
|
||||
[mypy]
|
||||
# some tests don't typecheck when this flag is set
|
||||
|
||||
Reference in New Issue
Block a user