Files
cantera/test_problems/SConscript
2023-03-10 13:39:22 -06:00

261 lines
12 KiB
Python

import os
from os.path import join as pjoin
from buildutils import *
Import('env','build','install')
localenv = env.Clone()
localenv.Prepend(CPPPATH=['#include', '#src', 'shared'])
localenv.Append(CCFLAGS=env['warning_flags'])
# Turn off optimization to speed up compilation
ccflags = localenv['CCFLAGS']
for optimize_flag in ('-O3', '-O2', '/O2'):
if optimize_flag in ccflags:
ccflags.remove(optimize_flag)
localenv['CCFLAGS'] = ccflags
localenv['ENV']['CANTERA_DATA'] = (Dir('#build/data').abspath + os.pathsep +
Dir('#samples/data').abspath + os.pathsep +
Dir('#test/data').abspath)
PASSED_FILES = {}
class Test(object):
def __init__(self, testName, subdir=None, programName=None,
blessedName='output_blessed.txt', arguments=(), options='',
artifacts=(), comparisons=(), profiles=(), ignoreLines=(),
tolerance=1e-5, threshold=1e-14,
dependencies=(), source_files=(), slow=False):
if slow and env["skip_slow_tests"]:
return
self.subdir = subdir or testName
self.programName = programName or testName
if isinstance(arguments, str):
arguments = [arguments]
self.blessedName = blessedName
self.artifacts = artifacts
if isinstance(self.artifacts, str):
self.artifacts = [self.artifacts]
self.comparisons = comparisons
self.profiles = profiles
self.tolerance = tolerance # error tolerance for CSV comparison
self.threshold = threshold # error threshold for CSV comparison
self.source_files = source_files
self.testName = testName
self.passedFile = '.passed-%s' % testName
PASSED_FILES[self.testName] = pjoin(self.subdir, self.passedFile)
test_results.tests[self.testName] = self
if source_files:
self.program = localenv.Program(
pjoin(self.subdir, self.programName), source_files,
LIBS=self.libs or localenv['cantera_shared_libs'])
else:
if isinstance(self.programName, str):
self.programName += '$PROGSUFFIX'
self.program = [self.programName]
command = self.program + [pjoin(self.subdir, arg) for arg in arguments]
run = localenv.RegressionTest(pjoin(self.subdir, self.passedFile),
command, active_test_name=testName,
test_blessed_file=blessedName, test_command_options=options,
test_comparisons=comparisons, test_profiles=profiles,
test_csv_threshold=threshold, test_csv_tolerance=tolerance,
test_ignoreLines=ignoreLines)
localenv.Depends(env['test_results'], run)
localenv.Depends(run, env['build_targets'])
localenv.Alias('test-clean', self.clean(localenv))
localenv.Alias('test-%s' % self.testName, run)
env['testNames'].append(self.testName)
# reset: just delete the ".passed" file so that this test will be re-run
localenv.Alias('test-reset', self.reset(localenv))
for dep in dependencies:
localenv.Depends(run, dep)
env.Depends(run, localenv.get('cantera_shlib', ()))
def reset(self, env):
f = pjoin(os.getcwd(), self.subdir, self.passedFile)
if os.path.exists(f):
uniqueName = 'reset-%s' % self.testName
target = env.Command(uniqueName, [], [Delete(f)])
return target
def clean(self, env, files=None):
# Name used for the output file
if self.blessedName is not None and 'blessed' in self.blessedName:
outName = self.blessedName.replace('blessed', 'output')
else:
outName = 'test_output.txt'
files = files or []
files += [self.passedFile, outName]
files += list(self.artifacts)
files += [comp[1] for comp in self.comparisons]
files += [comp[1] for comp in self.profiles]
files = [pjoin(os.getcwd(), self.subdir, name) for name in files]
uniqueName = 'clean-%s-' % self.testName
target = env.Command(uniqueName, [],
[Delete(f) for f in files
if os.path.exists(f)])
return target
class CompileAndTest(Test):
def __init__(self, testName, subdir=None, programName=None,
blessedName='output_blessed.txt', libs=(), extensions=('cpp',),
**kwargs):
self.libs = list(libs)
sources = multi_glob(env, subdir or testName, *extensions)
Test.__init__(self, testName, subdir, programName, blessedName,
source_files=sources, **kwargs)
def clean(self, env):
basenames = [os.path.splitext(f.name)[0] for f in self.source_files]
basenames.append(self.programName)
exts = ['', '.o', '.exe', '.exe.manifest', '.ilk', '.obj', '.pdb', '.obj.pdb']
files = [name + ext for name in set(basenames) for ext in exts]
return Test.clean(self, env, files=files)
dhGraph = CompileAndTest('DH_graph_dilute', 'cathermo/DH_graph_1', 'DH_graph_1',
'DH_NaCl_dilute_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments=File('#test/data/debye-huckel-all.yaml').abspath,
options='debye-huckel-dilute')
Test('DH_graph_acommon', 'cathermo/DH_graph_1',
dhGraph.program, 'DH_NaCl_acommon_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments=File('#test/data/debye-huckel-all.yaml').abspath,
options='debye-huckel-B-dot-a')
Test('DH_graph_bdotak', 'cathermo/DH_graph_1',
dhGraph.program, 'DH_NaCl_bdotak_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments=File('#test/data/debye-huckel-all.yaml').abspath,
options='debye-huckel-B-dot-ak')
Test('DH_graph_NM', 'cathermo/DH_graph_1',
dhGraph.program, 'DH_NaCl_NM_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments=File('#test/data/debye-huckel-all.yaml').abspath,
options='debye-huckel-beta_ij')
Test('DH_graph_Pitzer', 'cathermo/DH_graph_1',
dhGraph.program, 'DH_NaCl_Pitzer_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments=File('#test/data/debye-huckel-all.yaml').abspath,
options='debye-huckel-pitzer-beta_ij')
CompileAndTest('HMW_graph_CpvT', 'cathermo/HMW_graph_CpvT',
extensions=['^HMW_graph_CpvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.yaml').abspath)
CompileAndTest('HMW_graph_GvI', 'cathermo/HMW_graph_GvI',
blessedName=None,
comparisons=[('T298_blessed.csv', 'T298.csv'),
('T523_blessed.csv', 'T523.csv')],
artifacts=['T373.csv','T423.csv','T473.csv',
'T548.csv','T573.csv'])
CompileAndTest('HMW_graph_GvT', 'cathermo/HMW_graph_GvT',
extensions=['^HMW_graph_GvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.yaml').abspath)
CompileAndTest('HMW_graph_HvT', 'cathermo/HMW_graph_HvT',
extensions=['^HMW_graph_HvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.yaml').abspath)
CompileAndTest('HMW_graph_VvT', 'cathermo/HMW_graph_VvT',
extensions=['^HMW_graph_VvT.cpp'],
arguments=File('#test/data/HMW_NaCl_sp1977_alt.yaml').abspath)
CompileAndTest('HMW_test_1', 'cathermo/HMW_test_1',
blessedName='output_noD_blessed.txt')
CompileAndTest('HMW_test_3', 'cathermo/HMW_test_3',
blessedName='output_noD_blessed.txt')
CompileAndTest('IMSTester', 'cathermo/ims')
CompileAndTest('ISSPTester', 'cathermo/issp')
CompileAndTest('stoichSub', 'cathermo/stoichSub')
CompileAndTest('WaterPDSS', 'cathermo/testWaterPDSS')
CompileAndTest('WaterSSTP', 'cathermo/testWaterTP')
CompileAndTest('ISSPTester2', 'cathermo/VPissp')
CompileAndTest('ChemEquil_ionizedGas',
comparisons=[('table_blessed.csv', 'table.csv')])
CompileAndTest('CpJump', 'CpJump', 'CpJump', 'output_blessed.txt')
CompileAndTest('diamondSurf', 'diamondSurf', 'diamondSurf', 'runDiamond_blessed.out')
CompileAndTest('dustyGasTransport', slow=True)
CompileAndTest('mixGasTransport')
CompileAndTest('multiGasTransport')
CompileAndTest('pureFluid', 'pureFluidTest')
CompileAndTest('stoichSolidKinetics')
CompileAndTest('surfSolver', 'surfSolverTest', 'surfaceSolver', None,
comparisons=[('results_blessed.txt', 'results.txt')],
artifacts=['results.txt'],
extensions=['^surfaceSolver.cpp'])
CompileAndTest('surfSolver2', 'surfSolverTest', 'surfaceSolver2', None,
comparisons=[('results2_blessed.txt', 'results2.txt')],
artifacts=['results2.txt'],
extensions=['^surfaceSolver2.cpp'])
CompileAndTest('VCS-NaCl', 'VCSnonideal/NaCl_equil',
'nacl_equil', 'good_out.txt',
options='-d 3',
artifacts=['vcs_equilibrate_res.csv']), # not testing this file because it's not really csv
vcs_LiSi = CompileAndTest('VCS-LiSi', 'VCSnonideal/LatticeSolid_LiSi', 'latsol',
artifacts=['vcs_equilibrate_res.csv'])
Test('VCS-LiSi-verbose', 'VCSnonideal/LatticeSolid_LiSi', vcs_LiSi.program,
'verbose_blessed.txt', options='8',
artifacts=['vcs_equilibrate_res.csv'])
# C++ Samples
Test('cxx-bvp', 'cxx_samples', '#build/samples/cxx/bvp/blasius', None,
profiles=[('blasius_blessed.csv', 'blasius.csv')],
tolerance=1e-4)
Test('cxx-combustor', 'cxx_samples', '#build/samples/cxx/combustor/combustor', None,
comparisons=[('combustor_cxx_blessed.csv', 'combustor_cxx.csv')],
threshold=1e-10, tolerance=2e-4, slow=True)
Test('cxx-custom', 'cxx_samples', '#build/samples/cxx/custom/custom', None,
comparisons=[('custom_cxx_blessed.csv', 'custom_cxx.csv')],
threshold=1e-10, tolerance=2e-4, slow=True)
Test('cxx-demo', 'cxx_samples', '#build/samples/cxx/demo/demo',
'cxx_demo_blessed.txt',
threshold=1e-10, tolerance=2e-4, slow=True)
Test('cxx-flamespeed', 'cxx_samples', '#build/samples/cxx/flamespeed/flamespeed',
'cxx_flamespeed_blessed.txt',
comparisons=[('flamespeed_blessed.csv', 'flamespeed.csv')],
options='0.9 0 0')
Test('cxx-kinetics1', 'cxx_samples', '#build/samples/cxx/kinetics1/kinetics1', None,
comparisons=[('kin1_blessed.csv', 'kin1.csv')], slow=True, tolerance=2e-4)
Test('cxx-gas-transport', 'cxx_samples',
'#build/samples/cxx/gas_transport/gas_transport', None, slow=True,
comparisons=[('transport_mix_blessed.csv', 'transport_mix.csv'),
('transport_multi_blessed.csv', 'transport_multi.csv')])
Test('cxx-LiC6-electrode', 'cxx_samples',
'#build/samples/cxx/LiC6_electrode/LiC6_electrode', None, slow=True,
comparisons=[('LiC6_electrode_blessed.csv', 'LiC6_electrode_output.csv')])
if env['HAS_OPENMP']:
Test('cxx-openmp-ignition', 'cxx_samples',
'#build/samples/cxx/openmp_ignition/openmp_ignition',
'opmenmp_ignition_blessed.txt',
ignoreLines=['Running on'])
Test('cxx-rankine', 'cxx_samples', '#build/samples/cxx/rankine/rankine',
'rankine_blessed.txt')
Test('clib-demo', 'clib', '#build/samples/clib/demo', 'clib_demo_blessed.txt')
if env['f90_interface'] == 'y':
Test('f90-demo', 'fortran', '#build/samples/f90/demo', 'f90_demo_blessed.txt',
threshold=1e-10)
Test('f77-isentropic', 'fortran', '#build/samples/f77/isentropic',
'f77_isentropic_blessed.txt')
Test('f77-ctlib', 'fortran', '#build/samples/f77/ctlib', 'f77_ctlib_blessed.txt')
Test('f77-demo', 'fortran', '#build/samples/f77/demo', 'f77_demo_blessed.txt')
# Force explicitly-named tests to run even if SCons thinks they're up to date
for command in COMMAND_LINE_TARGETS:
if command.startswith('test-'):
name = command[5:]
if name in PASSED_FILES and os.path.exists(PASSED_FILES[name]):
os.remove(PASSED_FILES[name])