mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-24 22:59:59 +00:00
## Problem We started to store test results in a new format in https://github.com/neondatabase/neon/pull/4549. This PR switches scripts to query this db. (we can completely remove old DB/ingestions scripts in a couple of weeks after the PR merged) ## Summary of changes - `scripts/benchmark_durations.py` query new database - `scripts/flaky_tests.py` query new database
111 lines
3.1 KiB
Python
Executable File
111 lines
3.1 KiB
Python
Executable File
#! /usr/bin/env python3
|
|
|
|
import argparse
|
|
import json
|
|
import logging
|
|
from collections import defaultdict
|
|
from typing import DefaultDict, Dict
|
|
|
|
import psycopg2
|
|
import psycopg2.extras
|
|
|
|
FLAKY_TESTS_QUERY = """
|
|
SELECT
|
|
DISTINCT parent_suite, suite, name
|
|
FROM results
|
|
WHERE
|
|
started_at > CURRENT_DATE - INTERVAL '%s' day
|
|
AND (
|
|
(status IN ('failed', 'broken') AND reference = 'refs/heads/main')
|
|
OR flaky
|
|
)
|
|
;
|
|
"""
|
|
|
|
|
|
def main(args: argparse.Namespace):
|
|
connstr = args.connstr
|
|
interval_days = args.days
|
|
output = args.output
|
|
|
|
build_type = args.build_type
|
|
pg_version = args.pg_version
|
|
|
|
res: DefaultDict[str, DefaultDict[str, Dict[str, bool]]]
|
|
res = defaultdict(lambda: defaultdict(dict))
|
|
|
|
try:
|
|
logging.info("connecting to the database...")
|
|
with psycopg2.connect(connstr, connect_timeout=30) as conn:
|
|
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
|
|
logging.info("fetching flaky tests...")
|
|
cur.execute(FLAKY_TESTS_QUERY, (interval_days,))
|
|
rows = cur.fetchall()
|
|
except psycopg2.OperationalError as exc:
|
|
logging.error("cannot fetch flaky tests from the DB due to an error", exc)
|
|
rows = []
|
|
|
|
for row in rows:
|
|
# We don't want to automatically rerun tests in a performance suite
|
|
if row["parent_suite"] != "test_runner.regress":
|
|
continue
|
|
|
|
if row["name"].endswith("]"):
|
|
parametrized_test = row["name"].replace(
|
|
"[",
|
|
f"[{build_type}-pg{pg_version}-",
|
|
)
|
|
else:
|
|
parametrized_test = f"{row['name']}[{build_type}-pg{pg_version}]"
|
|
|
|
res[row["parent_suite"]][row["suite"]][parametrized_test] = True
|
|
|
|
logging.info(
|
|
f"\t{row['parent_suite'].replace('.', '/')}/{row['suite']}.py::{parametrized_test}"
|
|
)
|
|
|
|
logging.info(f"saving results to {output.name}")
|
|
json.dump(res, output, indent=2)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser(description="Detect flaky tests in the last N days")
|
|
parser.add_argument(
|
|
"--output",
|
|
type=argparse.FileType("w"),
|
|
default="flaky.json",
|
|
help="path to output json file (default: flaky.json)",
|
|
)
|
|
parser.add_argument(
|
|
"--days",
|
|
required=False,
|
|
default=10,
|
|
type=int,
|
|
help="how many days to look back for flaky tests (default: 10)",
|
|
)
|
|
parser.add_argument(
|
|
"--build-type",
|
|
required=True,
|
|
type=str,
|
|
help="for which build type to create list of flaky tests (debug or release)",
|
|
)
|
|
parser.add_argument(
|
|
"--pg-version",
|
|
required=True,
|
|
type=int,
|
|
help="for which Postgres version to create list of flaky tests (14, 15, etc.)",
|
|
)
|
|
parser.add_argument(
|
|
"connstr",
|
|
help="connection string to the test results database",
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
level = logging.INFO
|
|
logging.basicConfig(
|
|
format="%(message)s",
|
|
level=level,
|
|
)
|
|
|
|
main(args)
|