Merge pull request #1623 from atgeirr/use-convergencereport-gather

Use gatherConvergenceReport() in BlackoilWellModel.
This commit is contained in:
Kai Bao 2018-11-13 15:16:34 +01:00 committed by GitHub
commit 2ed2c20f12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 238 additions and 222 deletions

View File

@ -116,6 +116,7 @@ list (APPEND MAIN_SOURCE_FILES
opm/simulators/timestepping/TimeStepControl.cpp opm/simulators/timestepping/TimeStepControl.cpp
opm/simulators/timestepping/AdaptiveSimulatorTimer.cpp opm/simulators/timestepping/AdaptiveSimulatorTimer.cpp
opm/simulators/timestepping/SimulatorTimer.cpp opm/simulators/timestepping/SimulatorTimer.cpp
opm/simulators/timestepping/gatherConvergenceReport.cpp
) )
if(PETSc_FOUND) if(PETSc_FOUND)
@ -448,4 +449,5 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/timestepping/TimeStepControlInterface.hpp opm/simulators/timestepping/TimeStepControlInterface.hpp
opm/simulators/timestepping/SimulatorTimer.hpp opm/simulators/timestepping/SimulatorTimer.hpp
opm/simulators/timestepping/SimulatorTimerInterface.hpp opm/simulators/timestepping/SimulatorTimerInterface.hpp
opm/simulators/timestepping/gatherConvergenceReport.hpp
) )

View File

@ -824,9 +824,10 @@ namespace Opm {
residual_norms.push_back(CNV[compIdx]); residual_norms.push_back(CNV[compIdx]);
} }
const bool converged_Well = wellModel().getWellConvergence(B_avg); const auto report_well = wellModel().getWellConvergence(B_avg);
const bool converged_well = report_well.converged();
bool converged = converged_MB && converged_Well; bool converged = converged_MB && converged_well;
converged = converged && converged_CNV; converged = converged && converged_CNV;

View File

@ -52,6 +52,7 @@
#include <opm/autodiff/StandardWell.hpp> #include <opm/autodiff/StandardWell.hpp>
#include <opm/autodiff/MultisegmentWell.hpp> #include <opm/autodiff/MultisegmentWell.hpp>
#include <opm/autodiff/Compat.hpp> #include <opm/autodiff/Compat.hpp>
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include<opm/autodiff/SimFIBODetails.hpp> #include<opm/autodiff/SimFIBODetails.hpp>
#include<dune/common/fmatrix.hh> #include<dune/common/fmatrix.hh>
#include<dune/istl/bcrsmatrix.hh> #include<dune/istl/bcrsmatrix.hh>
@ -212,7 +213,7 @@ namespace Opm {
void applyScaleAdd(const Scalar alpha, const BVector& x, BVector& Ax) const; void applyScaleAdd(const Scalar alpha, const BVector& x, BVector& Ax) const;
// Check if well equations is converged. // Check if well equations is converged.
bool getWellConvergence(const std::vector<Scalar>& B_avg) const; ConvergenceReport getWellConvergence(const std::vector<Scalar>& B_avg) const;
// return all the wells. // return all the wells.
const WellCollection& wellCollection() const; const WellCollection& wellCollection() const;

View File

@ -726,7 +726,8 @@ namespace Opm {
do { do {
assembleWellEq(dt); assembleWellEq(dt);
converged = getWellConvergence(B_avg); const auto report = getWellConvergence(B_avg);
converged = report.converged();
// checking whether the group targets are converged // checking whether the group targets are converged
if (wellCollection().groupControlActive()) { if (wellCollection().groupControlActive()) {
@ -783,65 +784,35 @@ namespace Opm {
template<typename TypeTag> template<typename TypeTag>
bool ConvergenceReport
BlackoilWellModel<TypeTag>:: BlackoilWellModel<TypeTag>::
getWellConvergence(const std::vector<Scalar>& B_avg) const getWellConvergence(const std::vector<Scalar>& B_avg) const
{ {
ConvergenceReport report; // Get global (from all processes) convergence report.
ConvergenceReport local_report;
for (const auto& well : well_container_) { for (const auto& well : well_container_) {
report += well->getWellConvergence(B_avg); local_report += well->getWellConvergence(B_avg);
} }
ConvergenceReport report = gatherConvergenceReport(local_report);
// Log debug messages for NaN or too large residuals.
for (const auto& f : report.wellFailures()) {
if (f.severity() == ConvergenceReport::Severity::NotANumber) {
OpmLog::debug("NaN residual found with phase " + std::to_string(f.phase()) + " for well " + f.wellName());
} else if (f.severity() == ConvergenceReport::Severity::TooLarge) {
OpmLog::debug("Too large residual found with phase " + std::to_string(f.phase()) + " for well " + f.wellName());
}
}
// Throw if any NaN or too large residual found.
ConvergenceReport::Severity severity = report.severityOfWorstFailure(); ConvergenceReport::Severity severity = report.severityOfWorstFailure();
if (severity == ConvergenceReport::Severity::NotANumber) {
// checking NaN residuals OPM_THROW(Opm::NumericalIssue, "NaN residual found!");
{ } else if (severity == ConvergenceReport::Severity::TooLarge) {
// Debug reporting. OPM_THROW(Opm::NumericalIssue, "Too large residual found!");
for (const auto& f : report.wellFailures()) {
if (f.severity() == ConvergenceReport::Severity::NotANumber) {
OpmLog::debug("NaN residual found with phase " + std::to_string(f.phase()) + " for well " + f.wellName());
}
}
// Throw if any nan residual found.
bool nan_residual_found = (severity == ConvergenceReport::Severity::NotANumber);
const auto& grid = ebosSimulator_.vanguard().grid();
int value = nan_residual_found ? 1 : 0;
nan_residual_found = grid.comm().max(value);
if (nan_residual_found) {
OPM_THROW(Opm::NumericalIssue, "NaN residual found!");
}
} }
// checking too large residuals return report;
{
// Debug reporting.
for (const auto& f : report.wellFailures()) {
if (f.severity() == ConvergenceReport::Severity::TooLarge) {
OpmLog::debug("Too large residual found with phase " + std::to_string(f.phase()) + " for well " + f.wellName());
}
}
// Throw if any too large residual found.
bool too_large_residual_found = (severity == ConvergenceReport::Severity::TooLarge);
const auto& grid = ebosSimulator_.vanguard().grid();
int value = too_large_residual_found ? 1 : 0;
too_large_residual_found = grid.comm().max(value);
if (too_large_residual_found) {
OPM_THROW(Opm::NumericalIssue, "Too large residual found!");
}
}
// checking convergence
bool converged_well = report.converged();
{
const auto& grid = ebosSimulator_.vanguard().grid();
int value = converged_well ? 1 : 0;
converged_well = grid.comm().min(value);
}
return converged_well;
} }

View File

@ -0,0 +1,207 @@
/*
Copyright 2018 SINTEF Digital, Mathematics and Cybernetics.
Copyright 2018 Equinor.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#if HAVE_MPI
#include <mpi.h>
namespace
{
using Opm::ConvergenceReport;
void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f,
std::vector<char>& buf,
int& offset)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
}
void packWellFailure(const ConvergenceReport::WellFailure& f,
std::vector<char>& buf,
int& offset)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator.
MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(f.wellName().c_str(), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
}
void packConvergenceReport(const ConvergenceReport& local_report,
std::vector<char>& buf,
int& offset)
{
// Pack the data.
// Status will not be packed, it is possible to deduce from the other data.
// Reservoir failures.
const auto rf = local_report.reservoirFailures();
int num_rf = rf.size();
MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
for (const auto f : rf) {
packReservoirFailure(f, buf, offset);
}
// Well failures.
const auto wf = local_report.wellFailures();
int num_wf = wf.size();
MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
for (const auto f : wf) {
packWellFailure(f, buf, offset);
}
}
int messageSize(const ConvergenceReport& local_report)
{
int int_pack_size = 0;
MPI_Pack_size(1, MPI_INT, MPI_COMM_WORLD, &int_pack_size);
const int num_rf = local_report.reservoirFailures().size();
const int num_wf = local_report.wellFailures().size();
int wellnames_length = 0;
for (const auto f : local_report.wellFailures()) {
wellnames_length += (f.wellName().size() + 1);
}
return (2 + 3*num_rf + 4*num_wf) * int_pack_size + wellnames_length;
}
ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset)
{
int type = -1;
int severity = -1;
int phase = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &type, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &severity, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &phase, 1, MPI_INT, MPI_COMM_WORLD);
return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase);
}
ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset)
{
int type = -1;
int severity = -1;
int phase = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &type, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &severity, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &phase, 1, MPI_INT, MPI_COMM_WORLD);
int name_length = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &name_length, 1, MPI_INT, MPI_COMM_WORLD);
std::vector<char> namechars(name_length);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, MPI_COMM_WORLD);
std::string name(namechars.data());
return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase,
name);
}
ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset)
{
ConvergenceReport cr;
int num_rf = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, MPI_COMM_WORLD);
for (int rf = 0; rf < num_rf; ++rf) {
ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset);
cr.setReservoirFailed(f);
}
int num_wf = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, MPI_COMM_WORLD);
for (int wf = 0; wf < num_wf; ++wf) {
ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset);
cr.setWellFailed(f);
}
return cr;
}
ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ)
{
ConvergenceReport cr;
const int num_processes = displ.size() - 1;
for (int process = 0; process < num_processes; ++process) {
int offset = displ[process];
cr += unpackSingleConvergenceReport(recv_buffer, offset);
assert(offset == displ[process + 1]);
}
return cr;
}
} // anonymous namespace
namespace Opm
{
/// Create a global convergence report combining local
/// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report)
{
// Pack local report.
const int message_size = messageSize(local_report);
std::vector<char> buffer(message_size);
int offset = 0;
packConvergenceReport(local_report, buffer, offset);
assert(offset == message_size);
// Get message sizes and create offset/displacement array for gathering.
int num_processes = -1;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, MPI_COMM_WORLD);
std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
// Gather.
std::vector<char> recv_buffer(displ.back());
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
recv_buffer.data(), message_sizes.data(), displ.data(), MPI_PACKED,
MPI_COMM_WORLD);
// Unpack.
ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ);
return global_report;
}
} // namespace Opm
#else // HAVE_MPI
namespace Opm
{
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report)
{
return local_report;
}
} // namespace Opm
#endif // HAVE_MPI

View File

@ -23,180 +23,14 @@
#include <opm/simulators/timestepping/ConvergenceReport.hpp> #include <opm/simulators/timestepping/ConvergenceReport.hpp>
#if HAVE_MPI
#include <mpi.h>
namespace Opm namespace Opm
{ {
void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f,
std::vector<char>& buf,
int& offset)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
}
void packWellFailure(const ConvergenceReport::WellFailure& f,
std::vector<char>& buf,
int& offset)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator.
MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
MPI_Pack(f.wellName().c_str(), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
}
void packConvergenceReport(const ConvergenceReport& local_report,
std::vector<char>& buf,
int& offset)
{
// Pack the data.
// Status will not be packed, it is possible to deduce from the other data.
// Reservoir failures.
const auto rf = local_report.reservoirFailures();
int num_rf = rf.size();
MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
for (const auto f : rf) {
packReservoirFailure(f, buf, offset);
}
// Well failures.
const auto wf = local_report.wellFailures();
int num_wf = wf.size();
MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD);
for (const auto f : wf) {
packWellFailure(f, buf, offset);
}
}
int messageSize(const ConvergenceReport& local_report)
{
int int_pack_size = 0;
MPI_Pack_size(1, MPI_INT, MPI_COMM_WORLD, &int_pack_size);
const int num_rf = local_report.reservoirFailures().size();
const int num_wf = local_report.wellFailures().size();
int wellnames_length = 0;
for (const auto f : local_report.wellFailures()) {
wellnames_length += (f.wellName().size() + 1);
}
return (2 + 3*num_rf + 4*num_wf) * int_pack_size + wellnames_length;
}
ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset)
{
int type = -1;
int severity = -1;
int phase = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &type, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &severity, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &phase, 1, MPI_INT, MPI_COMM_WORLD);
return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase);
}
ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset)
{
int type = -1;
int severity = -1;
int phase = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &type, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &severity, 1, MPI_INT, MPI_COMM_WORLD);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &phase, 1, MPI_INT, MPI_COMM_WORLD);
int name_length = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &name_length, 1, MPI_INT, MPI_COMM_WORLD);
std::vector<char> namechars(name_length);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, MPI_COMM_WORLD);
std::string name(namechars.data());
return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase,
name);
}
ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset)
{
ConvergenceReport cr;
int num_rf = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, MPI_COMM_WORLD);
for (int rf = 0; rf < num_rf; ++rf) {
ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset);
cr.setReservoirFailed(f);
}
int num_wf = -1;
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, MPI_COMM_WORLD);
for (int wf = 0; wf < num_wf; ++wf) {
ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset);
cr.setWellFailed(f);
}
return cr;
}
ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ)
{
ConvergenceReport cr;
const int num_processes = displ.size() - 1;
for (int process = 0; process < num_processes; ++process) {
int offset = displ[process];
cr += unpackSingleConvergenceReport(recv_buffer, offset);
assert(offset == displ[process + 1]);
}
return cr;
}
/// Create a global convergence report combining local /// Create a global convergence report combining local
/// (per-process) reports. /// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report) ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report);
{
// Pack local report.
const int message_size = messageSize(local_report);
std::vector<char> buffer(message_size);
int offset = 0;
packConvergenceReport(local_report, buffer, offset);
assert(offset == message_size);
// Get message sizes and create offset/displacement array for gathering.
int num_processes = -1;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, MPI_COMM_WORLD);
std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
// Gather.
std::vector<char> recv_buffer(displ.back());
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
recv_buffer.data(), message_sizes.data(), displ.data(), MPI_PACKED,
MPI_COMM_WORLD);
// Unpack.
ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ);
return global_report;
}
} // namespace Opm } // namespace Opm
#else // HAVE_MPI
namespace Opm
{
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report)
{
return local_report;
}
} // namespace Opm
#endif // HAVE_MPI
#endif // OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED #endif // OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED