Files
cantera/test_problems/SConscript

298 lines
14 KiB
Python

from buildutils import *
Import('env','buildTargets','installTargets')
localenv = env.Clone()
localenv.Append(CPPPATH=['#include', '#src'])
os.environ['PYTHONPATH'] = pjoin(os.getcwd(), '..', 'Cantera', 'python')
os.environ['CANTERA_DATA'] = pjoin(os.getcwd(), '..', 'data', 'inputs')
testNames = []
class Test(object):
def __init__(self, testName, subdir, programName, blessedName, **kwargs):
assert set(kwargs.keys()) <= set(['arguments', 'options', 'artifacts',
'comparisons', 'tolerance', 'threshold',
'ignoreLines', 'extensions']), kwargs.keys()
self.subdir = subdir
self.programName = programName
arguments = kwargs.get('arguments') or []
if isinstance(arguments, str):
arguments = [arguments]
self.arguments = arguments # file arguments
self.options = kwargs.get('options') or ''
self.blessedName = blessedName
self.artifacts = kwargs.get('artifacts') or ()
self.comparisons = kwargs.get('comparisons') or ()
self.tolerance = kwargs.get('tolerance') or 1e-5 # error tolerance for CSV comparison
self.threshold = kwargs.get('threshold') or 1e-14 # error threshold for CSV comparison
# ignore lines starting with specified strings when comparing output files
self.ignoreLines = kwargs.get('ignoreLines') or []
self.testName = testName
self.passedFile = '.passed-%s' % testName
testResults.tests[self.testName] = self
run = self.run(localenv)
localenv.Alias('test-run', run)
localenv.Alias('test-clean', self.clean(localenv))
localenv.Alias('test-%s' % self.testName, run)
testNames.append(self.testName)
# reset: just delete the ".passed" file so that this test will be re-run
localenv.Alias('test-reset', self.reset(localenv))
def run(self, env, *args):
source = list(args)
if not source:
source.append(self.programName)
source.extend(pjoin(self.subdir, arg) for arg in self.arguments)
test = env.RegressionTest(pjoin(self.subdir, self.passedFile), source,
active_test_name=self.testName,
test_blessed_file=self.blessedName,
test_command_options=self.options,
test_comparisons=self.comparisons,
test_csv_threshold=self.threshold,
test_csv_tolerance=self.tolerance,
test_ignoreLines=self.ignoreLines)
return test
def reset(self, env, **kwargs):
f = pjoin(os.getcwd(), self.subdir, self.passedFile)
if os.path.exists(f):
uniqueName = 'reset-%s' % self.testName
target = env.Command(uniqueName, [], [Delete(f)])
return target
def clean(self, env, **kwargs):
# Name used for the output file
if self.blessedName is not None and 'blessed' in self.blessedName:
outName = self.blessedName.replace('blessed', 'output')
else:
outName = 'test_output.txt'
files = kwargs.get('files') or []
files += [self.passedFile,
'ct2ctml.log',
outName]
files += list(self.artifacts)
files += [comp[1] for comp in self.comparisons]
files = [pjoin(os.getcwd(), self.subdir, name) for name in files]
uniqueName = 'clean-%s-' % self.testName
target = env.Command(uniqueName, [],
[Delete(f) for f in files
if os.path.exists(f)])
return target
class CompileAndTest(Test):
def __init__(self, testName, subdir, programName, blessedName, **kwargs):
self.extensions = kwargs.get('extensions') or ('cpp',)
Test.__init__(self, testName, subdir, programName, blessedName, **kwargs)
def run(self, env):
prog = env.Program(pjoin(self.subdir, self.programName),
mglob(env, self.subdir, *self.extensions),
LIBS=env['cantera_libs'])
source = [prog]
return Test.run(self, env, *source)
def clean(self, env):
files = [self.programName + ext
for ext in ['', '.o', '.exe', '.exe.manifest', '.ilk',
'.obj', '.pdb']]
return Test.clean(self, env, files=files)
CompileAndTest('DH_graph_1',
pjoin('cathermo', 'DH_graph_1'),
'DH_graph_1', 'DH_NaCl_dilute_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_dilute.xml')
CompileAndTest('DH_graph_acommon',
pjoin('cathermo', 'DH_graph_acommon'),
'DH_graph_acommon', 'DH_NaCl_acommon_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_acommon.xml')
CompileAndTest('DH_graph_bdotak',
pjoin('cathermo', 'DH_graph_bdotak'),
'DH_graph_bdotak', 'DH_NaCl_bdotak_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_bdotak.xml')
CompileAndTest('DH_graph_NM',
pjoin('cathermo', 'DH_graph_NM'),
'DH_graph_NM', 'DH_NaCl_NM_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_NM.xml')
CompileAndTest('DH_graph_Pitzer',
pjoin('cathermo', 'DH_graph_Pitzer'),
'DH_graph_Pitzer', 'DH_NaCl_Pitzer_blessed.csv',
artifacts=['DH_graph_1.log'],
arguments='DH_NaCl_Pitzer.xml')
CompileAndTest('HMW_dupl_test',
pjoin('cathermo', 'HMW_dupl_test'),
'HMW_dupl_test', 'output_blessed.txt',
artifacts=['DH_graph_1.log'],
arguments='HMW_NaCl_sp1977_alt.xml')
CompileAndTest('HMW_graph_CpvT',
pjoin('cathermo', 'HMW_graph_CpvT'),
'HMW_graph_CpvT', 'output_blessed.txt',
extensions=['^HMW_graph_CpvT.cpp'],
arguments='HMW_NaCl_sp1977_alt.xml')
CompileAndTest('HMW_graph_GvI',
pjoin('cathermo', 'HMW_graph_GvI'),
'HMW_graph_GvI', None,
comparisons=[('T298_blessed.csv', 'T298.csv'),
('T523_blessed.csv', 'T523.csv')],
artifacts=['T373.csv','T423.csv','T473.csv',
'T548.csv','T573.csv'])
CompileAndTest('HMW_graph_GvT',
pjoin('cathermo', 'HMW_graph_GvT'),
'HMW_graph_GvT', 'output_blessed.txt',
extensions=['^HMW_graph_GvT.cpp'],
arguments='HMW_NaCl_sp1977_alt.xml')
CompileAndTest('HMW_graph_HvT',
pjoin('cathermo', 'HMW_graph_HvT'),
'HMW_graph_HvT', 'output_blessed.txt',
extensions=['^HMW_graph_HvT.cpp'],
arguments='HMW_NaCl_sp1977_alt.xml')
CompileAndTest('HMW_graph_VvT',
pjoin('cathermo', 'HMW_graph_VvT'),
'HMW_graph_VvT', 'output_blessed.txt',
extensions=['^HMW_graph_VvT.cpp'],
arguments='HMW_NaCl_sp1977_alt.xml')
CompileAndTest('HMW_test_1',
pjoin('cathermo', 'HMW_test_1'),
'HMW_test_1', 'output_noD_blessed.txt')
CompileAndTest('HMW_test_3',
pjoin('cathermo', 'HMW_test_3'),
'HMW_test_3', 'output_noD_blessed.txt')
CompileAndTest('IMSTester',
pjoin('cathermo', 'ims'),
'IMSTester', 'output_blessed.txt')
CompileAndTest('ISSPTester',
pjoin('cathermo', 'issp'),
'ISSPTester', 'output_blessed.txt')
CompileAndTest('stoichSubSSTP',
pjoin('cathermo', 'stoichSubSSTP'),
'stoichSubSSTP', 'output_blessed.txt')
CompileAndTest('IAPWSphi',
pjoin('cathermo', 'testIAPWS'),
'testIAPWSphi', 'output_blessed.txt')
CompileAndTest('IAPWSPres',
pjoin('cathermo', 'testIAPWSPres'),
'testIAPWSPres', 'output_blessed.txt')
CompileAndTest('IAPWSTripP',
pjoin('cathermo', 'testIAPWSTripP'),
'testIAPWSTripP', 'output_blessed.txt')
CompileAndTest('WaterPDSS',
pjoin('cathermo', 'testWaterPDSS'),
'testWaterPDSS', 'output_blessed.txt')
CompileAndTest('WaterSSTP',
pjoin('cathermo', 'testWaterTP'),
'testWaterSSTP', 'output_blessed.txt')
CompileAndTest('ISSPTester2',
pjoin('cathermo', 'VPissp'),
'ISSPTester2', 'output_blessed.txt')
CompileAndTest('wtWater',
pjoin('cathermo', 'wtWater'),
'wtWater', 'output_blessed.txt')
CompileAndTest('ChemEquil_gri_matrix',
'ChemEquil_gri_matrix', 'gri_matrix', 'output_blessed.txt')
CompileAndTest('ChemEquil_gri_pairs',
'ChemEquil_gri_pairs', 'gri_pairs', 'output_blessed.txt')
CompileAndTest('ChemEquil_ionizedGas',
'ChemEquil_ionizedGas', 'ionizedGasEquil',
'output_blessed.txt',
comparisons=[('table_blessed.csv', 'table.csv')])
CompileAndTest('ChemEquil_red1',
'ChemEquil_red1', 'basopt_red1', 'output_blessed.txt')
Test('ck2cti-gri30',
'ck2cti_test', '#build/bin/ck2cti$PROGSUFFIX', None,
options='-i gri30.inp -id gri30 -tr gri30_tran.dat',
comparisons=[('gri30a_blessed.cti','gri30.cti')],
ignoreLines=['#'],
artifacts=['ck2cti.log'])
Test('ck2cti-soot',
'ck2cti_test', '#build/bin/ck2cti$PROGSUFFIX', None,
options='-i soot.inp -id soot -t therm_soot.dat',
comparisons=[('soot_blessed.cti', 'soot.cti')],
ignoreLines=['#'],
artifacts=['ck2cti.log'])
CompileAndTest('CpJump', 'CpJump', 'CpJump', 'output_blessed.txt')
CompileAndTest('cxx_ex', 'cxx_ex', 'cxx_examples', 'output_blessed.txt',
comparisons=[('eq1_blessed.csv', 'eq1.csv'),
('kin1_blessed.csv', 'kin1.csv'),
('kin2_blessed.csv', 'kin2.csv'),
('tr1_blessed.csv', 'tr1.csv'),
('tr2_blessed.csv', 'tr2.csv')],
tolerance=2e-3,
threshold=1e-7,
artifacts=['eq1.dat', 'kin1.dat', 'kin2.dat', 'kin3.csv',
'kin3.dat', 'tr1.dat', 'tr2.dat'])
CompileAndTest('diamondSurf', 'diamondSurf', 'runDiamond', 'runDiamond_blessed.out')
CompileAndTest('fracCoeff', 'fracCoeff', 'fracCoeff', 'frac_blessed.out')
CompileAndTest('minDiamond', pjoin('min_python', 'minDiamond'),
'runDiamond', 'runDiamond_blessed.out',
comparisons=[('diamond_blessed.xml', 'diamond.xml')])
CompileAndTest('minNegA', pjoin('min_python', 'negATest'),
'negATest', 'negATest_blessed.out',
comparisons=[('noxNeg_blessed.xml', 'noxNeg.xml')])
CompileAndTest('mixGasTransport',
'mixGasTransport', 'mixGasTransport', 'output_blessed.txt')
CompileAndTest('multiGasTransport',
'multiGasTransport', 'multiGasTransport', 'output_blessed.txt')
CompileAndTest('NASA9poly', 'NASA9poly_test', 'NASA9poly_test', 'output_blessed.txt')
Test('nasa9_reader', 'nasa9_reader', '#build/bin/ck2cti$PROGSUFFIX', None,
options='-i sample.inp -id sample -t sampleData.inp',
comparisons=[('sample_blessed.cti', 'sample.cti')],
ignoreLines=['#'],
artifacts=['ck2cti.log'])
CompileAndTest('negA', 'negATest', 'negATest', 'negATest_blessed.out')
CompileAndTest('printUtil', 'printUtilUnitTest', 'pUtest', 'output_blessed.txt')
CompileAndTest('pureFluid', 'pureFluidTest', 'testPureWater', 'output_blessed.txt')
CompileAndTest('rankine_democxx', 'rankine_democxx', 'rankine', 'output_blessed.txt',
artifacts=['liquidvapor.xml'])
CompileAndTest('silane_equil', 'silane_equil', 'silane_equi', 'output_blessed.txt')
# spectroscopy is incomplete
CompileAndTest('surfkin', 'surfkin', 'surfdemo', 'output_blessed.txt')
CompileAndTest('surfSolver', 'surfSolverTest', 'surfaceSolver', 'surfaceSolver_blessed.out',
arguments='haca2.xml',
artifacts=['results.txt', 'diamond.xml'],
extensions=['^surfaceSolver.cpp']) # needs .csv, extra tests
CompileAndTest('VCSnonideal', pjoin('VCSnonideal', 'NaCl_equil'),
'nacl_equil', 'good_out.txt',
options='-d 3',
artifacts=['vcs_equilibrate_res.csv']), # not testing this file because it's not really csv
CompileAndTest('VPsilane_test', 'VPsilane_test', 'VPsilane_test', 'output_blessed.txt')
# Python Tests
if localenv['python_package'] == 'full':
Test('python-diamond', 'python', '$python_cmd', None,
options='../../Cantera/python/examples/surface_chemistry/diamond_cvd/diamond.py',
comparisons=[('diamond_blessed.csv', 'diamond.csv')],
artifacts=['diamond.xml'])
Test('python-frac', 'python', '$python_cmd', 'frac_blessed.out',
arguments='frac.py', artifacts=['frac.xml'])
Test('python-tut1', pjoin('python','tut1'), '$python_cmd',
'output_blessed.txt', arguments='tut1.py', artifacts=['gri30.xml'])
Test('python-tut2', pjoin('python','tut2'), '$python_cmd',
'output_blessed.txt', arguments='tut2.py',
artifacts=['gri30.xml', 'diamond.xml'])
# Skipping Python Tutorial 3 (documentation only)
Test('python-tut4', pjoin('python','tut4'), '$python_cmd',
'output_blessed.txt', arguments='tut4.py', artifacts=['gri30.xml'])
finish_tests = localenv.Command('finish_tests', [], testResults.printReport)
localenv.Depends(finish_tests, 'test-run')
Alias('test', finish_tests)
if 'test-help' in COMMAND_LINE_TARGETS:
print '\n*** Available regression tests ***\n'
for name in testNames:
print name
sys.exit(0)