added: scripts for generating failure reports on jenkins

this creates a pdf for each failed case with plots of summary
curves
This commit is contained in:
Arne Morten Kvarving 2024-09-06 13:12:15 +02:00
parent 03795d5b23
commit 2166ec6328
3 changed files with 164 additions and 0 deletions

View File

@ -435,6 +435,19 @@ include (${CMAKE_CURRENT_SOURCE_DIR}/modelTests.cmake)
if (HAVE_OPM_TESTS)
include (${CMAKE_CURRENT_SOURCE_DIR}/compareECLFiles.cmake)
if(OPM_ENABLE_PYTHON)
if(${CMAKE_BINARY_DIR} STREQUAL ${PROJECT_BINARY_DIR})
set(sim_dir ${CMAKE_BINARY_DIR})
else()
set(sim_dir ${CMAKE_BINARY_DIR}/opm-simulators)
endif()
add_custom_target(failure_report
USES_TERMINAL
COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${opm-common_DIR}/python"
${PROJECT_SOURCE_DIR}/tests/make_failure_report.sh
${OPM_TESTS_ROOT} ${CMAKE_BINARY_DIR} ${sim_dir})
endif()
endif()
if(MPI_FOUND)

31
tests/make_failure_report.sh Executable file
View File

@ -0,0 +1,31 @@
#!/bin/bash
# Generates a summary plot comparison for each failed test case
OPM_TESTS_ROOT=$1
BUILD_DIR=$2
RESULT_DIR=$3
SOURCE_DIR=`dirname "$0"`
FAILED_TESTS=`cat $BUILD_DIR/Testing/Temporary/LastTestsFailed*.log`
mkdir -p $BUILD_DIR/failure_report
cd $BUILD_DIR/failure_report
rm -f *
for failed_test in $FAILED_TESTS
do
grep -q -E "compareECLFiles" <<< $failed_test
test $? -ne 0 && continue
failed_test=`echo $failed_test | sed -e 's/.*://g' -e 's/\+/./g'`
# Extract test properties
binary=$(awk -v search="set_tests_properties\\\($failed_test\$" -v prop="SIMULATOR" -f ${SOURCE_DIR}/getprop.awk $RESULT_DIR/CTestTestfile.cmake)
dir_name=$(awk -v search="set_tests_properties\\\($failed_test\$" -v prop="DIRNAME" -f ${SOURCE_DIR}/getprop.awk $RESULT_DIR/CTestTestfile.cmake)
file_name=$(awk -v search="set_tests_properties\\\($failed_test\$" -v prop="FILENAME" -f ${SOURCE_DIR}/getprop.awk $RESULT_DIR/CTestTestfile.cmake)
test_name=$(awk -v search="set_tests_properties\\\($failed_test\$" -v prop="TESTNAME" -f ${SOURCE_DIR}/getprop.awk $RESULT_DIR/CTestTestfile.cmake)
echo "Processing ${test_name}"
$SOURCE_DIR/plot_well_comparison.py -r $OPM_TESTS_ROOT/$dir_name/opm-simulation-reference/$binary/$file_name -s $RESULT_DIR/tests/results/$binary+$test_name/$file_name -c $test_name -o plot
done
$SOURCE_DIR/plot_well_comparison.py -o rename

120
tests/plot_well_comparison.py Executable file
View File

@ -0,0 +1,120 @@
#!/usr/bin/python3
# Generates a PDF with plots of all summary curves from a reference
# case and a 'new' simulation.
import argparse
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from opm.io.ecl import ESmry
import os
import pickle
from scipy import integrate, stats
# Run analysis of a test.
# Calculate the deviation for each curve
# and generate a pdf with plots ordered according to deviation
def run_analysis(ref_file_name, sys_file_name, test_name):
ref_file = ESmry(ref_file_name + '.SMSPEC')
sim_file = ESmry(sys_file_name + '.SMSPEC')
ref_time = ref_file.dates()
sim_time = sim_file.dates()
ref_time_in_secs = [(v - ref_time[0]).total_seconds() for v in ref_time]
sim_time_in_secs = [(v - sim_time[0]).total_seconds() for v in sim_time]
plt.rcParams['font.size'] = 8
# Attempt at sorting the graphs in descending eyeball norm order.
# - Normalize by inf-norm to get the same range in each graph, ie (0,1).
# - Convert graphs to probability distributions (ie integral under curve should be 1).
# - Use the wasserstein distance scaled by area under reference curve.
deviation={}
for r in ref_file.keys():
if r == 'TIME' or r == 'YEARS':
continue
try:
ref = ref_file[r]
sim = sim_file[r]
except:
continue
if len(ref) == 0 and len(sim) == 0:
continue
if not (any(ref) or any(sim)):
continue
ref /= np.linalg.norm(ref, np.inf)
sim /= np.linalg.norm(sim, np.inf)
A_ref = integrate.trapezoid(ref, ref_time_in_secs, 0.0)
A_sim = integrate.trapezoid(sim, sim_time_in_secs, 0.0)
deviation[r] = stats.wasserstein_distance(ref / A_ref, sim / A_sim) * A_ref
p = PdfPages(f'{test_name}.pdf')
for r in sorted(deviation, key = lambda x: deviation[x], reverse=True):
try:
ref = ref_file[r]
sim = sim_file[r]
except:
continue
fig, ax = plt.subplots()
ax.plot(ref_time, ref, linestyle='dashed', linewidth=0.5, marker='o', markersize=1.0)
ax.plot(sim_time, sim, linewidth=0.5, marker='x', markersize=1.0)
ax.legend(['Reference', 'New simulation'])
plt.title(r)
u = ref_file.units(r)
if u:
plt.ylabel(u)
myFmt = DateFormatter("%Y-%b")
ax.xaxis.set_major_formatter(myFmt)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
plt.grid()
fig.autofmt_xdate()
fig.savefig(p, format='pdf')
plt.close(fig)
p.close()
if os.path.exists('max_devs.pkl'):
with open('max_devs.pkl', 'rb') as f:
max_deviations = pickle.load(f)
else:
max_deviations = {}
max_dev = max(deviation, key = lambda x: deviation[x])
max_deviations[test_name] = deviation[max_dev]
with open('max_devs.pkl', 'wb') as f:
pickle.dump(max_deviations, f)
# Rename files to rank them according to maximum deviations
def reorder_files():
with open('max_devs.pkl', 'rb') as f:
max_deviations = pickle.load(f)
c = 1
for file in sorted(max_deviations, key = lambda x: max_deviations[x], reverse=True):
os.rename(f'{file}.pdf', f'{c:02d}_{file}.pdf')
c += 1
# Main code
parser = argparse.ArgumentParser('plot_well_comparison.py')
parser.add_argument('-c', help='Name of test to process', dest='test_name')
parser.add_argument('-r', help='Reference file', dest='ref_file')
parser.add_argument('-s', help='Simulation file', dest='sim_file')
parser.add_argument('-o', choices=['plot', 'rename'], help='Operation to do', required=True, dest='operation')
args = parser.parse_args()
if args.operation == 'plot':
run_analysis(args.ref_file, args.sim_file, args.test_name)
else:
reorder_files()