Add more variants of the sequential scan performance tests.

More rows, and test with serial and parallel plans. But fewer iterations,
so that the tests run in < 1 minutes, and we don't need to mark them as
"slow".
This commit is contained in:
Heikki Linnakangas
2022-03-25 23:42:13 +02:00
parent b8cba059a5
commit 5e04dad360

View File

@@ -1,8 +1,5 @@
# Test sequential scan speed
#
# The test table is large enough (3-4 MB) that it doesn't fit in the compute node
# cache, so the seqscans go to the page server. But small enough that it fits
# into memory in the page server.
from contextlib import closing
from dataclasses import dataclass
from fixtures.zenith_fixtures import ZenithEnv
@@ -12,11 +9,18 @@ from fixtures.compare_fixtures import PgCompare
import pytest
@pytest.mark.parametrize('rows', [
pytest.param(100000),
pytest.param(1000000, marks=pytest.mark.slow),
])
def test_small_seqscans(zenith_with_baseline: PgCompare, rows: int):
@pytest.mark.parametrize(
'rows,iters,workers',
[
# The test table is large enough (3-4 MB) that it doesn't fit in the compute node
# cache, so the seqscans go to the page server. But small enough that it fits
# into memory in the page server.
pytest.param(100000, 100, 0),
# Also test with a larger table, with and without parallelism
pytest.param(10000000, 1, 0),
pytest.param(10000000, 1, 4)
])
def test_seqscans(zenith_with_baseline: PgCompare, rows: int, iters: int, workers: int):
env = zenith_with_baseline
with closing(env.pg.connect()) as conn:
@@ -36,6 +40,8 @@ def test_small_seqscans(zenith_with_baseline: PgCompare, rows: int):
assert int(shared_buffers) < int(table_size)
env.zenbenchmark.record("table_size", table_size, 'bytes', MetricReport.TEST_PARAM)
cur.execute(f"set max_parallel_workers_per_gather = {workers}")
with env.record_duration('run'):
for i in range(1000):
for i in range(iters):
cur.execute('select count(*) from t;')