|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +# This script reads a CSV file produced by the following invocation: |
| 4 | +# |
| 5 | +# veristat --emit file,prog,verdict,states \ |
| 6 | +# --output-format csv \ |
| 7 | +# --compare ... |
| 8 | +# |
| 9 | +# And produces a markdown summary for the file. |
| 10 | +# The summary is printed to standard output and appended to a file |
| 11 | +# pointed to by GITHUB_STEP_SUMMARY variable. |
| 12 | +# |
| 13 | +# Script exits with return code 1 if there are new failures in the |
| 14 | +# veristat results. |
| 15 | +# |
| 16 | +# For testing purposes invoke as follows: |
| 17 | +# |
| 18 | +# GITHUB_STEP_SUMMARY=/dev/null python3 veristat-compare.py test.csv |
| 19 | +# |
| 20 | +# File format (columns): |
| 21 | +# 0. file_name |
| 22 | +# 1. prog_name |
| 23 | +# 2. verdict_base |
| 24 | +# 3. verdict_comp |
| 25 | +# 4. verdict_diff |
| 26 | +# 5. total_states_base |
| 27 | +# 6. total_states_comp |
| 28 | +# 7. total_states_diff |
| 29 | +# |
| 30 | +# Records sample: |
| 31 | +# file-a,a,success,failure,MISMATCH,12,12,+0 (+0.00%) |
| 32 | +# file-b,b,success,success,MATCH,67,67,+0 (+0.00%) |
| 33 | +# |
| 34 | +# For better readability suffixes '_OLD' and '_NEW' |
| 35 | +# are used instead of '_base' and '_comp' for variable |
| 36 | +# names etc. |
| 37 | + |
| 38 | +import io |
| 39 | +import os |
| 40 | +import sys |
| 41 | +import csv |
| 42 | +import logging |
| 43 | +import argparse |
| 44 | +from functools import reduce |
| 45 | +from dataclasses import dataclass |
| 46 | + |
| 47 | +TRESHOLD_PCT = 0 |
| 48 | + |
| 49 | +HEADERS = ['file_name', 'prog_name', 'verdict_base', 'verdict_comp', |
| 50 | + 'verdict_diff', 'total_states_base', 'total_states_comp', |
| 51 | + 'total_states_diff'] |
| 52 | + |
| 53 | +FILE = 0 |
| 54 | +PROG = 1 |
| 55 | +VERDICT_OLD = 2 |
| 56 | +VERDICT_NEW = 3 |
| 57 | +STATES_OLD = 5 |
| 58 | +STATES_NEW = 6 |
| 59 | + |
| 60 | +# Given a table row, compute relative increase in the number of |
| 61 | +# processed states. |
| 62 | +def compute_diff(v): |
| 63 | + old = int(v[STATES_OLD]) if v[STATES_OLD] != 'N/A' else 0 |
| 64 | + new = int(v[STATES_NEW]) if v[STATES_NEW] != 'N/A' else 0 |
| 65 | + if old == 0: |
| 66 | + return 1 |
| 67 | + return (new - old) / old |
| 68 | + |
| 69 | +@dataclass |
| 70 | +class VeristatInfo: |
| 71 | + table: list |
| 72 | + changes: bool |
| 73 | + new_failures: bool |
| 74 | + |
| 75 | +# Read CSV table expecting the above described format. |
| 76 | +# Return VeristatInfo instance. |
| 77 | +def parse_table(csv_filename): |
| 78 | + new_failures = False |
| 79 | + changes = False |
| 80 | + table = [] |
| 81 | + |
| 82 | + with open(csv_filename, newline='') as file: |
| 83 | + reader = csv.reader(file) |
| 84 | + headers = next(reader) |
| 85 | + if headers != HEADERS: |
| 86 | + raise Exception(f'Unexpected table header for {filename}: {headers}') |
| 87 | + |
| 88 | + for v in reader: |
| 89 | + add = False |
| 90 | + verdict = v[VERDICT_NEW] |
| 91 | + diff = compute_diff(v) |
| 92 | + |
| 93 | + if v[VERDICT_OLD] != v[VERDICT_NEW]: |
| 94 | + changes = True |
| 95 | + add = True |
| 96 | + verdict = f'{v[VERDICT_OLD]} -> {v[VERDICT_NEW]}' |
| 97 | + if v[VERDICT_NEW] == 'failure': |
| 98 | + new_failures = True |
| 99 | + verdict += ' (!!)' |
| 100 | + |
| 101 | + if abs(diff * 100) > TRESHOLD_PCT: |
| 102 | + changes = True |
| 103 | + add = True |
| 104 | + |
| 105 | + if not add: |
| 106 | + continue |
| 107 | + |
| 108 | + diff_txt = '{:+.1f} %'.format(diff * 100) |
| 109 | + table.append([v[FILE], v[PROG], verdict, diff_txt]) |
| 110 | + |
| 111 | + return VeristatInfo(table=table, |
| 112 | + changes=changes, |
| 113 | + new_failures=new_failures) |
| 114 | + |
| 115 | +def format_table(headers, rows, html_mode): |
| 116 | + def decorate(val, width): |
| 117 | + s = str(val) |
| 118 | + if html_mode: |
| 119 | + s = s.replace(' -> ', ' → '); |
| 120 | + s = s.replace(' (!!)', ' :bangbang: '); |
| 121 | + return s.ljust(width) |
| 122 | + |
| 123 | + column_widths = list(reduce(lambda acc, row: map(max, map(len, row), acc), |
| 124 | + rows, |
| 125 | + map(len, headers))) |
| 126 | + |
| 127 | + with io.StringIO() as out: |
| 128 | + def print_row(row): |
| 129 | + out.write('| ') |
| 130 | + out.write(' | '.join(map(decorate, row, column_widths))) |
| 131 | + out.write(' |\n') |
| 132 | + |
| 133 | + print_row(headers) |
| 134 | + |
| 135 | + out.write('|') |
| 136 | + out.write('|'.join(map(lambda w: '-' * (w + 2), column_widths))) |
| 137 | + out.write('|\n') |
| 138 | + |
| 139 | + for row in rows: |
| 140 | + print_row(row) |
| 141 | + |
| 142 | + return out.getvalue() |
| 143 | + |
| 144 | +def format_section_name(info): |
| 145 | + if info.new_failures: |
| 146 | + return 'There are new veristat failures' |
| 147 | + if info.changes: |
| 148 | + return 'There are changes in verification performance' |
| 149 | + return 'No changes in verification performance' |
| 150 | + |
| 151 | +SUMMARY_HEADERS = ['File', 'Program', 'Verdict', 'States Diff (%)'] |
| 152 | + |
| 153 | +def format_html_summary(info): |
| 154 | + section_name = format_section_name(info) |
| 155 | + if not info.table: |
| 156 | + return f'# {section_name}\n' |
| 157 | + |
| 158 | + table = format_table(SUMMARY_HEADERS, info.table, True) |
| 159 | + return f''' |
| 160 | +# {section_name} |
| 161 | +
|
| 162 | +<details> |
| 163 | +<summary>Click to expand</summary> |
| 164 | +
|
| 165 | +{table} |
| 166 | +</details> |
| 167 | +'''.lstrip() |
| 168 | + |
| 169 | +def format_text_summary(info): |
| 170 | + section_name = format_section_name(info) |
| 171 | + table = format_table(SUMMARY_HEADERS, info.table, False) |
| 172 | + if not info.table: |
| 173 | + return f'# {section_name}\n' |
| 174 | + |
| 175 | + return f''' |
| 176 | +# {section_name} |
| 177 | +
|
| 178 | +{table} |
| 179 | +'''.lstrip() |
| 180 | + |
| 181 | +def main(compare_csv_filename, summary_filename): |
| 182 | + info = parse_table(compare_csv_filename) |
| 183 | + sys.stdout.write(format_text_summary(info)) |
| 184 | + with open(summary_filename, 'a') as f: |
| 185 | + f.write(format_html_summary(info)) |
| 186 | + |
| 187 | + if info.new_failures: |
| 188 | + return 1 |
| 189 | + |
| 190 | + return 0 |
| 191 | + |
| 192 | +if __name__ == '__main__': |
| 193 | + parser = argparse.ArgumentParser( |
| 194 | + description="""Print veristat comparison output as markdown step summary""" |
| 195 | + ) |
| 196 | + parser.add_argument('filename') |
| 197 | + args = parser.parse_args() |
| 198 | + summary_filename = os.getenv('GITHUB_STEP_SUMMARY') |
| 199 | + if not summary_filename: |
| 200 | + logging.error('GITHUB_STEP_SUMMARY environment variable is not set') |
| 201 | + sys.exit(1) |
| 202 | + sys.exit(main(args.filename, summary_filename)) |
0 commit comments