Add slow seqscan perf test (#1283)

This commit is contained in:
bojanserafimov
2022-02-16 10:59:51 -05:00
committed by GitHub
parent afb3342e46
commit 335abfcc28
4 changed files with 59 additions and 7 deletions

View File

@@ -4,10 +4,12 @@
# cache, so the seqscans go to the page server. But small enough that it fits
# into memory in the page server.
from contextlib import closing
from dataclasses import dataclass
from fixtures.zenith_fixtures import ZenithEnv
from fixtures.log_helper import log
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
from fixtures.compare_fixtures import PgCompare
import pytest
pytest_plugins = (
"fixtures.zenith_fixtures",
@@ -16,13 +18,17 @@ pytest_plugins = (
)
def test_small_seqscans(zenith_with_baseline: PgCompare):
@pytest.mark.parametrize('rows', [
pytest.param(100000),
pytest.param(1000000, marks=pytest.mark.slow),
])
def test_small_seqscans(zenith_with_baseline: PgCompare, rows: int):
env = zenith_with_baseline
with closing(env.pg.connect()) as conn:
with conn.cursor() as cur:
cur.execute('create table t (i integer);')
cur.execute('insert into t values (generate_series(1,100000));')
cur.execute(f'insert into t values (generate_series(1,{rows}));')
# Verify that the table is larger than shared_buffers
cur.execute('''
@@ -30,8 +36,11 @@ def test_small_seqscans(zenith_with_baseline: PgCompare):
from pg_settings where name = 'shared_buffers'
''')
row = cur.fetchone()
log.info(f"shared_buffers is {row[0]}, table size {row[1]}")
assert int(row[0]) < int(row[1])
shared_buffers = row[0]
table_size = row[1]
log.info(f"shared_buffers is {shared_buffers}, table size {table_size}")
assert int(shared_buffers) < int(table_size)
env.zenbenchmark.record("table_size", table_size, 'bytes', MetricReport.TEST_PARAM)
with env.record_duration('run'):
for i in range(1000):