[Python] Fix Numpy 2.0 compatibility issues

This commit is contained in:
Ray Speth 2024-06-08 18:38:38 -04:00 committed by Bryan Weber
parent 9f48b07c3d
commit 4d509b5423
8 changed files with 20 additions and 14 deletions

View File

@ -450,8 +450,10 @@ jobs:
find samples/python -type f -iname "*.py" \ find samples/python -type f -iname "*.py" \
-exec sh -c 'for n; do echo "$n" | tee -a results.txt && python3 "$n" >> results.txt || exit 1; done' sh {} + -exec sh -c 'for n; do echo "$n" | tee -a results.txt && python3 "$n" >> results.txt || exit 1; done' sh {} +
env: env:
# The ignore setting here is due to a new warning introduced in Matplotlib==3.6.0 # The pyparsing ignore setting is due to a new warning introduced in Matplotlib==3.6.0
PYTHONWARNINGS: "error,ignore:warn_name_set_on_empty_Forward::pyparsing,ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:" # @todo: Remove the trapz-related ignore when dropping support for NumPy 1.x
# and replacing np.trapz with np.trapezoid
PYTHONWARNINGS: "error,ignore:warn_name_set_on_empty_Forward::pyparsing,ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:,ignore:`trapz`:DeprecationWarning"
MPLBACKEND: Agg MPLBACKEND: Agg
- name: Save the results file for inspection - name: Save the results file for inspection
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4

View File

@ -305,10 +305,10 @@ def analyze_errors(grids, speeds, true_speed):
and compare these with our estimated errors. and compare these with our estimated errors.
This will show if our estimates are reasonable, or conservative, or too optimistic. This will show if our estimates are reasonable, or conservative, or too optimistic.
""" """
true_speed_estimates = np.full_like(speeds, np.NaN) true_speed_estimates = np.full_like(speeds, np.nan)
total_percent_error_estimates = np.full_like(speeds, np.NaN) total_percent_error_estimates = np.full_like(speeds, np.nan)
actual_extrapolated_percent_errors = np.full_like(speeds, np.NaN) actual_extrapolated_percent_errors = np.full_like(speeds, np.nan)
actual_raw_percent_errors = np.full_like(speeds, np.NaN) actual_raw_percent_errors = np.full_like(speeds, np.nan)
for i in range(3, len(grids)): for i in range(3, len(grids)):
print(grids[: i + 1]) print(grids[: i + 1])
true_speed_estimate, total_percent_error_estimate = extrapolate_uncertainty( true_speed_estimate, total_percent_error_estimate = extrapolate_uncertainty(

View File

@ -972,9 +972,9 @@ def compare_profiles(
"pos. err" "pos. err"
)) ))
header.append(f"{10*'-'} ----- {14*'-'} {14*'-'} {9*'-'} {9*'-'} {9*'-'}") header.append(f"{10*'-'} ----- {14*'-'} {14*'-'} {9*'-'} {9*'-'} {9*'-'}")
ref_ptp = reference.ptp(axis=1) ref_ptp = np.ptp(reference, axis=1)
ref_max = np.abs(reference).max(axis=1) ref_max = np.abs(reference).max(axis=1)
sample_ptp = sample.ptp(axis=1) sample_ptp = np.ptp(sample, axis=1)
sample_max = np.abs(sample).max(axis=1) sample_max = np.abs(sample).max(axis=1)
scale = np.maximum( scale = np.maximum(
np.maximum(ref_ptp[1:], ref_max[1:]), np.maximum(ref_ptp[1:], ref_max[1:]),

View File

@ -487,11 +487,11 @@ class TestFreeFlame(utilities.CanteraTest):
self.sim.transport_model = 'unity-Lewis-number' self.sim.transport_model = 'unity-Lewis-number'
self.sim.set_refine_criteria(ratio=3.0, slope=0.08, curve=0.12) self.sim.set_refine_criteria(ratio=3.0, slope=0.08, curve=0.12)
self.sim.solve(loglevel=0, auto=True) self.sim.solve(loglevel=0, auto=True)
dh_unity_lewis = self.sim.enthalpy_mass.ptp() dh_unity_lewis = np.ptp(self.sim.enthalpy_mass)
self.sim.transport_model = 'mixture-averaged' self.sim.transport_model = 'mixture-averaged'
self.sim.solve(loglevel=0) self.sim.solve(loglevel=0)
dh_mix = self.sim.enthalpy_mass.ptp() dh_mix = np.ptp(self.sim.enthalpy_mass)
# deviation of enthalpy should be much lower for unity Le model (tends # deviation of enthalpy should be much lower for unity Le model (tends
# towards zero as grid is refined) # towards zero as grid is refined)

View File

@ -1851,7 +1851,7 @@ class TestExtensible3(utilities.CanteraTest):
class InterfaceReactionTests(ReactionTests): class InterfaceReactionTests(ReactionTests):
# test suite for surface reaction expressions # test suite for surface reaction expressions
_value = np.NAN # reference value _value = np.nan # reference value
_coverage_deps = None _coverage_deps = None
@classmethod @classmethod

View File

@ -2383,7 +2383,9 @@ class TestReactorSensitivities(utilities.CanteraTest):
return dtdp return dtdp
# See https://github.com/Cantera/enhancements/issues/55 # See https://github.com/Cantera/enhancements/issues/55
# @todo: replace np.trapz with np.trapezoid when dropping support for NumPy 1.x
@unittest.skip("Integration of sensitivity ODEs is unreliable") @unittest.skip("Integration of sensitivity ODEs is unreliable")
@pytest.mark.filterwarnings("ignore:`trapz` is deprecated")
def test_ignition_delay_sensitivity(self): def test_ignition_delay_sensitivity(self):
species = ('H2', 'H', 'O2', 'H2O2', 'H2O', 'OH', 'HO2') species = ('H2', 'H', 'O2', 'H2O2', 'H2O', 'OH', 'HO2')
dtigdh_cvodes = self.calc_dtdh(species) dtigdh_cvodes = self.calc_dtdh(species)

View File

@ -1208,6 +1208,8 @@ class TestPlasmaPhase(utilities.CanteraTest):
(ct.avogadro * ct.electron_charge)) (ct.avogadro * ct.electron_charge))
self.assertNear(mean_electron_energy, self.phase.mean_electron_energy) self.assertNear(mean_electron_energy, self.phase.mean_electron_energy)
# @todo: replace np.trapz with np.trapezoid when dropping support for NumPy 1.x
@pytest.mark.filterwarnings("ignore:`trapz` is deprecated")
def test_discretized_electron_energy_distribution(self): def test_discretized_electron_energy_distribution(self):
levels = np.array([0.0, 1.0, 10.0]) levels = np.array([0.0, 1.0, 10.0])
dist = np.array([0.0, 0.9, 0.01]) dist = np.array([0.0, 0.9, 0.01])

View File

@ -181,10 +181,10 @@ def compareProfiles(reference, sample, rtol=1e-5, atol=1e-12, xtol=1e-5):
bad = [] bad = []
template = '{0:9.4e} {1: 3d} {2:14.7e} {3:14.7e} {4:9.3e} {5:9.3e} {6:9.3e}' template = '{0:9.4e} {1: 3d} {2:14.7e} {3:14.7e} {4:9.3e} {5:9.3e} {6:9.3e}'
for i in range(1, nVars): for i in range(1, nVars):
scale = max(max(abs(reference[i])), reference[i].ptp(), scale = max(max(abs(reference[i])), np.ptp(reference[i]),
max(abs(sample[i])), sample[i].ptp()) max(abs(sample[i])), np.ptp(sample[i]))
slope = np.zeros(nTimes) slope = np.zeros(nTimes)
slope[1:] = np.diff(reference[i]) / np.diff(reference[0]) * reference[0].ptp() slope[1:] = np.diff(reference[i]) / np.diff(reference[0]) * np.ptp(reference[0])
comp = np.interp(reference[0], sample[0], sample[i]) comp = np.interp(reference[0], sample[0], sample[i])
for j in range(nTimes): for j in range(nTimes):