Improves error handling and reporting in parallel.

As the ErrorGuard also dumps warnings we now always dump
it (previously only on error) to get these messages in the
console.

If there are error encountered, we log a meaningful error
message (the real cause was missing previously) and do a
graceful exit after MPI_Finalize.
This commit is contained in:
Markus Blatt 2020-09-14 12:53:02 +02:00
parent c219b61c65
commit 519e4ac002

View File

@ -170,9 +170,7 @@ void readDeck(int rank, std::string& deckFilename, std::unique_ptr<Opm::Deck>& d
errorGuard = std::make_unique<ErrorGuard>(); errorGuard = std::make_unique<ErrorGuard>();
} }
#if HAVE_MPI int parseSuccess = 1; // > 0 is success
int parseSuccess = 0;
#endif
std::string failureMessage; std::string failureMessage;
if (rank==0) { if (rank==0) {
@ -223,13 +221,12 @@ void readDeck(int rank, std::string& deckFilename, std::unique_ptr<Opm::Deck>& d
} }
if (!summaryConfig) if (!summaryConfig)
summaryConfig = std::make_unique<Opm::SummaryConfig>(*deck, *schedule, eclipseState->getTableManager(), *parseContext, *errorGuard); summaryConfig = std::make_unique<Opm::SummaryConfig>(*deck, *schedule, eclipseState->getTableManager(), *parseContext, *errorGuard);
#if HAVE_MPI
parseSuccess = 1;
#endif
} }
catch(const std::exception& e) catch(const std::exception& e)
{ {
failureMessage = e.what(); failureMessage = e.what();
parseSuccess = 0;
} }
} }
#if HAVE_MPI #if HAVE_MPI
@ -241,28 +238,66 @@ void readDeck(int rank, std::string& deckFilename, std::unique_ptr<Opm::Deck>& d
if (!eclipseState) if (!eclipseState)
eclipseState = std::make_unique<Opm::ParallelEclipseState>(); eclipseState = std::make_unique<Opm::ParallelEclipseState>();
} }
auto comm = Dune::MPIHelper::getCollectiveCommunication();
parseSuccess = comm.max(parseSuccess);
if (!parseSuccess)
{
if (*errorGuard) {
errorGuard->dump();
errorGuard->clear();
}
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
Opm::eclStateBroadcast(*eclipseState, *schedule, *summaryConfig);
#endif #endif
Opm::checkConsistentArrayDimensions(*eclipseState, *schedule, *parseContext, *errorGuard); if (*errorGuard) { // errors encountered
parseSuccess = 0;
}
if (*errorGuard) { // print errors and warnings!
errorGuard->dump(); errorGuard->dump();
errorGuard->clear(); errorGuard->clear();
throw std::runtime_error("Unrecoverable errors were encountered while loading input."); auto comm = Dune::MPIHelper::getCollectiveCommunication();
parseSuccess = comm.min(parseSuccess);
if (!parseSuccess)
{
if (rank == 0)
{
OpmLog::error(std::string("Unrecoverable errors were encountered while loading input: ")+failureMessage);
}
#if HAVE_MPI
MPI_Finalize();
#endif
exit(1);
}
parseSuccess = 1;
try
{
#if HAVE_MPI
Opm::eclStateBroadcast(*eclipseState, *schedule, *summaryConfig);
#endif
Opm::checkConsistentArrayDimensions(*eclipseState, *schedule, *parseContext, *errorGuard);
}
catch(const std::exception& e)
{
failureMessage = e.what();
parseSuccess = 0;
}
if (*errorGuard) { // errors encountered
parseSuccess = 0;
}
// Print warnings and erors on every rank! Maybe too much?
errorGuard->dump();
errorGuard->clear();
parseSuccess = comm.min(parseSuccess);
if (!parseSuccess)
{
if (rank == 0)
{
OpmLog::error(std::string("Unrecoverable errors were encountered while loading input: ")+failureMessage);
}
#if HAVE_MPI
MPI_Finalize();
#endif
exit(1);
} }
} }
} // end namespace Opm } // end namespace Opm