mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-13 16:32:56 +00:00
## Problem Statistic is saved in local file and so lost on compute restart. Persist in in page server using the same AUX file mechanism used for replication slots See more about motivation in https://neondb.slack.com/archives/C04DGM6SMTM/p1703077676522789 ## Summary of changes Persist postal file using AUX mechanism Postgres PRs: https://github.com/neondatabase/postgres/pull/547 https://github.com/neondatabase/postgres/pull/446 https://github.com/neondatabase/postgres/pull/445 Related to #6684 and #6228 Co-authored-by: Konstantin Knizhnik <knizhnik@neon.tech>
84 lines
2.5 KiB
Python
84 lines
2.5 KiB
Python
import pytest
|
|
from fixtures.neon_fixtures import NeonEnv
|
|
from fixtures.pg_version import PgVersion
|
|
|
|
|
|
#
|
|
# Test that pgstat statistic is preserved across sessions
|
|
#
|
|
def test_pgstat(neon_simple_env: NeonEnv):
|
|
env = neon_simple_env
|
|
if env.pg_version == PgVersion.V14:
|
|
pytest.skip("PG14 doesn't support pgstat statistic persistence")
|
|
|
|
n = 10000
|
|
endpoint = env.endpoints.create_start(
|
|
"main", config_lines=["neon_pgstat_file_size_limit=100kB", "autovacuum=off"]
|
|
)
|
|
|
|
con = endpoint.connect()
|
|
cur = con.cursor()
|
|
|
|
cur.execute("create table t(x integer)")
|
|
cur.execute(f"insert into t values (generate_series(1,{n}))")
|
|
cur.execute("vacuum analyze t")
|
|
cur.execute("select sum(x) from t")
|
|
cur.execute("update t set x=x+1")
|
|
|
|
cur.execute("select pg_stat_force_next_flush()")
|
|
|
|
cur.execute(
|
|
"select seq_scan,seq_tup_read,n_tup_ins,n_tup_upd,n_live_tup,n_dead_tup, vacuum_count,analyze_count from pg_stat_user_tables"
|
|
)
|
|
rec = cur.fetchall()[0]
|
|
assert rec == (2, n * 2, n, n, n * 2, n, 1, 1)
|
|
|
|
endpoint.stop()
|
|
endpoint.start()
|
|
|
|
con = endpoint.connect()
|
|
cur = con.cursor()
|
|
|
|
cur.execute(
|
|
"select seq_scan,seq_tup_read,n_tup_ins,n_tup_upd,n_live_tup,n_dead_tup, vacuum_count,analyze_count from pg_stat_user_tables"
|
|
)
|
|
rec = cur.fetchall()[0]
|
|
assert rec == (2, n * 2, n, n, n * 2, n, 1, 1)
|
|
|
|
cur.execute("update t set x=x+1")
|
|
|
|
# stop without checkpoint
|
|
endpoint.stop(mode="immediate")
|
|
endpoint.start()
|
|
|
|
con = endpoint.connect()
|
|
cur = con.cursor()
|
|
|
|
cur.execute(
|
|
"select seq_scan,seq_tup_read,n_tup_ins,n_tup_upd,n_live_tup,n_dead_tup, vacuum_count,analyze_count from pg_stat_user_tables"
|
|
)
|
|
rec = cur.fetchall()[0]
|
|
# pgstat information should be discarded in case of abnormal termination
|
|
assert rec == (0, 0, 0, 0, 0, 0, 0, 0)
|
|
|
|
cur.execute("select sum(x) from t")
|
|
|
|
# create more relations to increase size of statistics
|
|
for i in range(1, 1000):
|
|
cur.execute(f"create table t{i}(pk integer primary key)")
|
|
|
|
cur.execute("select pg_stat_force_next_flush()")
|
|
|
|
endpoint.stop()
|
|
endpoint.start()
|
|
|
|
con = endpoint.connect()
|
|
cur = con.cursor()
|
|
|
|
cur.execute(
|
|
"select seq_scan,seq_tup_read,n_tup_ins,n_tup_upd,n_live_tup,n_dead_tup, vacuum_count,analyze_count from pg_stat_user_tables"
|
|
)
|
|
rec = cur.fetchall()[0]
|
|
# pgstat information is not restored because its size exeeds 100k threshold
|
|
assert rec == (0, 0, 0, 0, 0, 0, 0, 0)
|