Output CNV Histogram to INFOITER File

This commit tracks the number of cells and their associate fraction
of the model's "eligible" pore volume (total pore volume in
numerical aquifers subtracted from the model's total pore volume) in
three distinct categories as a function of the non-linear iteration
number:

  - 0: MAX_p { CNV_p } <= strict CNV tolerance
  - 1: MAX_p { CNV_p } \in (strict, relaxed]
  - 2: MAX_p { CNV_p } > relaxed CNV tolerance

We then output these cell counts and pore volume fractions as new
items in the INFOITER file to enable more targeted analysis of the
non-linear convergence behaviour.

To this end, introduce a type alias CnvPvSplit in the
ConvergenceReport and aggregate these across the MPI ranks before we
collect them in the ConvergenceReport objects.

While here, also reduce the amount of repeated logic in
gatherConvergenceReport.cpp through a few local lambdas.
This commit is contained in:
Bård Skaflestad 2024-05-07 15:49:12 +02:00
parent 0caf92c6a6
commit a801df31fa
4 changed files with 458 additions and 228 deletions

View File

@ -62,10 +62,12 @@
#include <cmath>
#include <filesystem>
#include <fstream>
#include <functional>
#include <iomanip>
#include <ios>
#include <limits>
#include <memory>
#include <numeric>
#include <sstream>
#include <tuple>
#include <utility>
@ -833,57 +835,88 @@ namespace Opm {
}
/// \brief Compute the total pore volume of cells violating CNV that are not part
/// of a numerical aquifer.
Scalar computeCnvErrorPv(const std::vector<Scalar>& B_avg, double dt)
/// \brief Compute pore-volume/cell count split among "converged",
/// "relaxed converged", "unconverged" cells based on CNV point
/// measures.
std::pair<std::vector<double>, std::vector<int>>
characteriseCnvPvSplit(const std::vector<Scalar>& B_avg, const double dt)
{
OPM_TIMEBLOCK(computeCnvErrorPv);
Scalar errorPV{};
const auto& model = simulator_.model();
const auto& problem = simulator_.problem();
const auto& residual = simulator_.model().linearizer().residual();
const auto& gridView = simulator().gridView();
ElementContext elemCtx(simulator_);
IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
// 0: cnv <= tolerance_cnv
// 1: tolerance_cnv < cnv <= tolerance_cnv_relaxed
// 2: tolerance_cnv_relaxed < cnv
constexpr auto numPvGroups = std::vector<double>::size_type{3};
auto cnvPvSplit = std::pair<std::vector<double>, std::vector<int>> {
std::piecewise_construct,
std::forward_as_tuple(numPvGroups),
std::forward_as_tuple(numPvGroups)
};
auto maxCNV = [&B_avg, dt](const auto& residual, const double pvol)
{
return (dt / pvol) *
std::inner_product(residual.begin(), residual.end(),
B_avg.begin(), Scalar{0},
[](const Scalar m, const auto& x)
{
using std::abs;
return std::max(m, abs(x));
}, std::multiplies<>{});
};
auto& [splitPV, cellCntPV] = cnvPvSplit;
const auto& model = this->simulator().model();
const auto& problem = this->simulator().problem();
const auto& residual = model.linearizer().residual();
const auto& gridView = this->simulator().gridView();
const IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
ElementContext elemCtx(this->simulator());
OPM_BEGIN_PARALLEL_TRY_CATCH();
for (const auto& elem : elements(gridView, Dune::Partitions::interior))
{
for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
// Skip cells of numerical Aquifer
if (isNumericalAquiferCell(elem))
{
if (isNumericalAquiferCell(elem)) {
continue;
}
elemCtx.updatePrimaryStencil(elem);
// elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) * model.dofTotalVolume(cell_idx);
const auto& cellResidual = residual[cell_idx];
bool cnvViolated = false;
const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0)
* model.dofTotalVolume(cell_idx);
for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx)
{
using std::abs;
Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
cnvViolated = cnvViolated || (abs(CNV) > param_.tolerance_cnv_);
}
const auto maxCnv = maxCNV(residual[cell_idx], pvValue);
if (cnvViolated)
{
errorPV += pvValue;
}
const auto ix = (maxCnv > this->param_.tolerance_cnv_)
+ (maxCnv > this->param_.tolerance_cnv_relaxed_);
splitPV[ix] += static_cast<double>(pvValue);
++cellCntPV[ix];
}
OPM_END_PARALLEL_TRY_CATCH("BlackoilModel::ComputeCnvError() failed: ", grid_.comm());
OPM_END_PARALLEL_TRY_CATCH("BlackoilModel::characteriseCnvPvSplit() failed: ",
this->grid_.comm());
return grid_.comm().sum(errorPV);
this->grid_.comm().sum(splitPV .data(), splitPV .size());
this->grid_.comm().sum(cellCntPV.data(), cellCntPV.size());
return cnvPvSplit;
}
void updateTUNING(const Tuning& tuning) {
param_.tolerance_mb_ = tuning.XXXMBE;
if ( terminal_output_ ) {
OpmLog::debug(fmt::format("Setting BlackoilModel mass balance limit (XXXMBE) to {:.2e}", tuning.XXXMBE));
void updateTUNING(const Tuning& tuning)
{
this->param_.tolerance_mb_ = tuning.XXXMBE;
if (terminal_output_) {
OpmLog::debug(fmt::format("Setting BlackoilModel mass "
"balance limit (XXXMBE) to {:.2e}",
tuning.XXXMBE));
}
}
@ -898,54 +931,93 @@ namespace Opm {
OPM_TIMEBLOCK(getReservoirConvergence);
using Vector = std::vector<Scalar>;
ConvergenceReport report{reportTime};
const int numComp = numEq;
Vector R_sum(numComp, 0.0 );
Vector maxCoeff(numComp, std::numeric_limits< Scalar >::lowest() );
Vector R_sum(numComp, Scalar{0});
Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest());
std::vector<int> maxCoeffCell(numComp, -1);
const auto [ pvSumLocal, numAquiferPvSumLocal] =
const auto [pvSumLocal, numAquiferPvSumLocal] =
this->localConvergenceData(R_sum, maxCoeff, B_avg, maxCoeffCell);
// compute global sum and max of quantities
const auto [ pvSum, numAquiferPvSum ] =
convergenceReduction(grid_.comm(), pvSumLocal,
numAquiferPvSumLocal,
R_sum, maxCoeff, B_avg);
const auto& [pvSum, numAquiferPvSum] =
this->convergenceReduction(this->grid_.comm(),
pvSumLocal,
numAquiferPvSumLocal,
R_sum, maxCoeff, B_avg);
const auto cnvErrorPvFraction =
computeCnvErrorPv(B_avg, dt)
/ (pvSum - numAquiferPvSum);
report.setCnvPoreVolSplit(this->characteriseCnvPvSplit(B_avg, dt),
pvSum - numAquiferPvSum);
// For each iteration, we need to determine whether to use the relaxed tolerances.
// To disable the usage of relaxed tolerances, you can set the relaxed tolerances as the strict tolerances.
// For each iteration, we need to determine whether to use the
// relaxed tolerances. To disable the usage of relaxed
// tolerances, you can set the relaxed tolerances as the strict
// tolerances. If min_strict_mb_iter = -1 (default) we use a
// relaxed tolerance for the mass balance for the last
// iterations. For positive values we use the relaxed tolerance
// after the given number of iterations
const bool relax_final_iteration_mb =
(this->param_.min_strict_mb_iter_ < 0)
&& (iteration == maxIter);
// If min_strict_mb_iter = -1 (default) we use a relaxed tolerance for the mass balance for the last iterations
// For positive values we use the relaxed tolerance after the given number of iterations
const bool relax_final_iteration_mb = (param_.min_strict_mb_iter_ < 0) && (iteration == maxIter);
const bool use_relaxed_mb = relax_final_iteration_mb || (param_.min_strict_mb_iter_ >= 0 && iteration >= param_.min_strict_mb_iter_);
const bool use_relaxed_mb = relax_final_iteration_mb ||
((this->param_.min_strict_mb_iter_ >= 0) &&
(iteration >= this->param_.min_strict_mb_iter_));
// If min_strict_cnv_iter = -1 we use a relaxed tolerance for
// the cnv for the last iterations. For positive values we use
// the relaxed tolerance after the given number of iterations.
// We also use relaxed tolerances for cells with total
// pore-volume less than relaxed_max_pv_fraction_. Default
// value of relaxed_max_pv_fraction_ is 0.03
const bool relax_final_iteration_cnv =
(this->param_.min_strict_cnv_iter_ < 0)
&& (iteration == maxIter);
// If min_strict_cnv_iter = -1 we use a relaxed tolerance for the cnv for the last iterations
// For positive values we use the relaxed tolerance after the given number of iterations
// We also use relaxed tolerances for cells with total poro volume less than relaxed_max_pv_fraction_
// Default value of relaxed_max_pv_fraction_ is 0.03
const bool relax_final_iteration_cnv = (param_.min_strict_cnv_iter_ < 0) && (iteration == maxIter);
const bool relax_iter_cnv = param_.min_strict_mb_iter_ >= 0 && iteration >= param_.min_strict_mb_iter_;
const bool relax_pv_fraction_cnv = cnvErrorPvFraction < param_.relaxed_max_pv_fraction_;
const bool use_relaxed_cnv = relax_final_iteration_cnv || relax_pv_fraction_cnv || relax_iter_cnv;
const bool relax_iter_cnv = (this->param_.min_strict_mb_iter_ >= 0)
&& (iteration >= this->param_.min_strict_mb_iter_);
if (relax_final_iteration_mb || relax_final_iteration_cnv) {
if ( terminal_output_ ) {
std::string message = "Number of newton iterations reached its maximum try to continue with relaxed tolerances:";
if (relax_final_iteration_mb)
message += fmt::format(" MB: {:.1e}", param_.tolerance_mb_relaxed_);
if (relax_final_iteration_cnv)
message += fmt::format(" CNV: {:.1e}", param_.tolerance_cnv_relaxed_);
// Note trailing parentheses here, just before the final
// semicolon. This is an immediately invoked function
// expression which calculates a single boolean value.
const auto relax_pv_fraction_cnv =
[&report, pvSum, numAquiferPvSum, this]()
{
const auto& cnvPvSplit = report.cnvPvSplit().first;
OpmLog::debug(message);
// [1]: tol < cnv <= relaxed
// [2]: relaxed < cnv
return static_cast<Scalar>(cnvPvSplit[1] + cnvPvSplit[2]) <
this->param_.relaxed_max_pv_fraction_ * (pvSum - numAquiferPvSum);
}();
const bool use_relaxed_cnv = relax_final_iteration_cnv
|| relax_pv_fraction_cnv
|| relax_iter_cnv;
if ((relax_final_iteration_mb || relax_final_iteration_cnv) &&
this->terminal_output_)
{
std::string message =
"Number of newton iterations reached its maximum "
"try to continue with relaxed tolerances:";
if (relax_final_iteration_mb) {
message += fmt::format(" MB: {:.1e}", param_.tolerance_mb_relaxed_);
}
if (relax_final_iteration_cnv) {
message += fmt::format(" CNV: {:.1e}", param_.tolerance_cnv_relaxed_);
}
OpmLog::debug(message);
}
const Scalar tol_cnv = use_relaxed_cnv ? param_.tolerance_cnv_relaxed_ : param_.tolerance_cnv_;
const Scalar tol_mb = use_relaxed_mb ? param_.tolerance_mb_relaxed_ : param_.tolerance_mb_;
const auto tol_cnv = use_relaxed_cnv ? param_.tolerance_cnv_relaxed_ : param_.tolerance_cnv_;
const auto tol_mb = use_relaxed_mb ? param_.tolerance_mb_relaxed_ : param_.tolerance_mb_;
// Finish computation
std::vector<Scalar> CNV(numComp);
@ -957,43 +1029,48 @@ namespace Opm {
residual_norms.push_back(CNV[compIdx]);
}
// Create convergence report.
ConvergenceReport report{reportTime};
report.setPoreVolCnvViolationFraction(cnvErrorPvFraction, pvSum - numAquiferPvSum);
using CR = ConvergenceReport;
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
const Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
const CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
CR::ReservoirFailure::Type::Cnv };
const Scalar tol[2] = { tol_mb, tol_cnv };
const Scalar res[2] = {
mass_balance_residual[compIdx], CNV[compIdx],
};
const CR::ReservoirFailure::Type types[2] = {
CR::ReservoirFailure::Type::MassBalance,
CR::ReservoirFailure::Type::Cnv,
};
const Scalar tol[2] = { tol_mb, tol_cnv, };
for (int ii : {0, 1}) {
if (std::isnan(res[ii])) {
report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
if ( terminal_output_ ) {
if (this->terminal_output_) {
OpmLog::debug("NaN residual for " + this->compNames_.name(compIdx) + " equation.");
}
} else if (res[ii] > maxResidualAllowed()) {
}
else if (res[ii] > maxResidualAllowed()) {
report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
if ( terminal_output_ ) {
if (this->terminal_output_) {
OpmLog::debug("Too large residual for " + this->compNames_.name(compIdx) + " equation.");
}
} else if (res[ii] < 0.0) {
}
else if (res[ii] < 0.0) {
report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
if ( terminal_output_ ) {
if (this->terminal_output_) {
OpmLog::debug("Negative residual for " + this->compNames_.name(compIdx) + " equation.");
}
} else if (res[ii] > tol[ii]) {
}
else if (res[ii] > tol[ii]) {
report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
}
report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii]);
}
}
// Output of residuals.
if ( terminal_output_ )
{
if (this->terminal_output_) {
// Only rank 0 does print to std::cout
if (iteration == 0) {
std::string msg = "Iter";
@ -1002,25 +1079,32 @@ namespace Opm {
msg += this->compNames_.name(compIdx)[0];
msg += ") ";
}
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
msg += " CNV(";
msg += this->compNames_.name(compIdx)[0];
msg += ") ";
}
OpmLog::debug(msg);
}
std::ostringstream ss;
const std::streamsize oprec = ss.precision(3);
const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
ss << std::setw(4) << iteration;
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
ss << std::setw(11) << mass_balance_residual[compIdx];
}
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
ss << std::setw(11) << CNV[compIdx];
}
ss.precision(oprec);
ss.flags(oflags);
OpmLog::debug(ss.str());
}

View File

@ -57,8 +57,13 @@ namespace {
"ReportStep"s,
"TimeStep"s,
"Time"s,
"CnvErrPvFrac"s,
"Iteration"s,
"CnvPvFracConv"s,
"CnvPvFracRelax"s,
"CnvPvFracUnconv"s,
"CnvCellCntConv"s,
"CnvCellCntRelax"s,
"CnvCellCntUnconv"s,
};
}
@ -149,10 +154,17 @@ namespace {
<< std::setw(firstColSize) << request.currentStep << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< convertTime(report.reportTime()) << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< report.cnvViolatedPvFraction() << ' '
<< std::setw(firstColSize) << iter;
for (const auto& splitPv : report.cnvPvSplit().first) {
os << ' ' << std::setprecision(4) << std::setw(firstColSize)
<< splitPv / report.eligiblePoreVolume();
}
for (const auto& splitPv : report.cnvPvSplit().second) {
os << ' ' << std::setw(firstColSize) << splitPv;
}
for (const auto& metric : report.reservoirConvergence()) {
os << std::setprecision(4) << std::setw(colSize) << metric.value();
}

View File

@ -53,6 +53,10 @@ namespace Opm
NotANumber = 3,
};
using CnvPvSplit = std::pair<
std::vector<double>,
std::vector<int>>;
class ReservoirFailure
{
public:
@ -164,11 +168,11 @@ namespace Opm
wellGroupTargetsViolated_ = wellGroupTargetsViolated;
}
void setPoreVolCnvViolationFraction(const double cnvErrorPvFraction,
const double cnvErrorPvFractionDenom)
void setCnvPoreVolSplit(const CnvPvSplit& cnvPvSplit,
const double eligiblePoreVolume)
{
this->pvFracCnvViol_ = cnvErrorPvFraction;
this->pvFracCnvViolDenom_ = cnvErrorPvFractionDenom;
this->cnvPvSplit_ = cnvPvSplit;
this->eligiblePoreVolume_ = eligiblePoreVolume;
}
ConvergenceReport& operator+=(const ConvergenceReport& other)
@ -182,18 +186,16 @@ namespace Opm
assert(wellFailed() != well_failures_.empty());
wellGroupTargetsViolated_ = (wellGroupTargetsViolated_ || other.wellGroupTargetsViolated_);
if ((this->pvFracCnvViolDenom_ > 0.0) ||
(other.pvFracCnvViolDenom_ > 0.0))
{
this->pvFracCnvViol_ = (this->pvFracCnvViol_ * this->pvFracCnvViolDenom_ +
other.pvFracCnvViol_ * other.pvFracCnvViolDenom_)
/ (this->pvFracCnvViolDenom_ + other.pvFracCnvViolDenom_);
this->pvFracCnvViolDenom_ += other.pvFracCnvViolDenom_;
}
else {
this->pvFracCnvViol_ = 0.0;
this->pvFracCnvViolDenom_ = 0.0;
// Note regarding the CNV pore-volume split: We depend on the
// fact that the quantities have already been aggregated across
// all MPI ranks--see the implementation of member function
// BlackoilModel::getReservoirConvergence() for details--and are
// therefore equal on all ranks. Consequently, we simply assign
// 'other's values here, if it is non-empty. Empty splits
// typically come from well contributions.
if (! other.cnvPvSplit_.first.empty()) {
this->cnvPvSplit_ = other.cnvPvSplit_;
this->eligiblePoreVolume_ = other.eligiblePoreVolume_;
}
return *this;
@ -206,14 +208,14 @@ namespace Opm
return reportTime_;
}
double cnvViolatedPvFraction() const
double eligiblePoreVolume() const
{
return this->pvFracCnvViol_;
return this->eligiblePoreVolume_;
}
std::pair<double, double> cnvViolatedPvFractionPack() const
const CnvPvSplit& cnvPvSplit() const
{
return { this->pvFracCnvViol_, this->pvFracCnvViolDenom_ };
return this->cnvPvSplit_;
}
bool converged() const
@ -270,8 +272,8 @@ namespace Opm
std::vector<WellFailure> well_failures_;
std::vector<ReservoirConvergenceMetric> res_convergence_;
bool wellGroupTargetsViolated_;
double pvFracCnvViol_{};
double pvFracCnvViolDenom_{};
CnvPvSplit cnvPvSplit_{};
double eligiblePoreVolume_{};
};
struct StepReport

View File

@ -22,7 +22,10 @@
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <algorithm>
#include <tuple>
#include <utility>
#include <vector>
#if HAVE_MPI
@ -34,82 +37,135 @@ namespace
using Opm::ConvergenceReport;
void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
std::vector<char>& buf, int& offset,
MPI_Comm mpi_communicator)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
auto pack = [&buf, &offset, mpi_communicator]
(const auto* ptr, const int size, const auto type)
{
MPI_Pack(ptr, size, type,
buf.data(), buf.size(), &offset,
mpi_communicator);
};
const int type = static_cast<int>(f.type());
const int severity = static_cast<int>(f.severity());
const int phase = f.phase();
pack(&type, 1, MPI_INT);
pack(&severity, 1, MPI_INT);
pack(&phase, 1, MPI_INT);
}
void packReservoirConvergenceMetric(const ConvergenceReport::ReservoirConvergenceMetric& m,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
std::vector<char>& buf, int& offset,
MPI_Comm mpi_communicator)
{
int type = static_cast<int>(m.type());
int phase = m.phase();
double value = m.value();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&value, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
auto pack = [&buf, &offset, mpi_communicator]
(const auto* ptr, const int size, const auto type)
{
MPI_Pack(ptr, size, type,
buf.data(), buf.size(), &offset,
mpi_communicator);
};
const int type = static_cast<int>(m.type());
const int phase = m.phase();
const double value = m.value();
pack(&type, 1, MPI_INT);
pack(&phase, 1, MPI_INT);
pack(&value, 1, MPI_DOUBLE);
}
void packWellFailure(const ConvergenceReport::WellFailure& f,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
std::vector<char>& buf, int& offset,
MPI_Comm mpi_communicator)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator.
MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(const_cast<char*>(f.wellName().c_str()), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, mpi_communicator);
auto pack = [&buf, &offset, mpi_communicator]
(const auto* ptr, const int size, const auto type)
{
MPI_Pack(ptr, size, type,
buf.data(), buf.size(), &offset,
mpi_communicator);
};
const int type = static_cast<int>(f.type());
const int severity = static_cast<int>(f.severity());
const int phase = f.phase();
pack(&type, 1, MPI_INT);
pack(&severity, 1, MPI_INT);
pack(&phase, 1, MPI_INT);
// Add one for the null terminator.
const int name_length = f.wellName().size() + 1;
pack(&name_length, 1, MPI_INT);
pack(f.wellName().c_str(), name_length, MPI_CHAR);
}
void packConvergenceReport(const ConvergenceReport& local_report,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
std::vector<char>& buf, int& offset,
MPI_Comm mpi_communicator)
{
auto pack = [&buf, &offset, mpi_communicator]
(const auto* ptr, const int size, const auto type)
{
MPI_Pack(ptr, size, type,
buf.data(), buf.size(), &offset,
mpi_communicator);
};
// Pack the data.
// Status will not be packed, it is possible to deduce from the other data.
double reportTime = local_report.reportTime();
MPI_Pack(&reportTime, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
const auto reportTime = local_report.reportTime();
pack(&reportTime, 1, MPI_DOUBLE);
// CNV pore-volume split
{
const auto cnvPvFrac = local_report.cnvViolatedPvFractionPack();
const auto& cnvPvSplit = local_report.cnvPvSplit();
const auto eligiblePoreVolume = local_report.eligiblePoreVolume();
const auto num_cnv_pv = static_cast<int>(cnvPvSplit.first.size());
MPI_Pack(&cnvPvFrac.first , 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&cnvPvFrac.second, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
pack(&eligiblePoreVolume , 1 , MPI_DOUBLE);
pack(&num_cnv_pv , 1 , MPI_INT);
pack(cnvPvSplit.first .data(), num_cnv_pv, MPI_DOUBLE);
pack(cnvPvSplit.second.data(), num_cnv_pv, MPI_INT);
}
// Reservoir failures.
const auto rf = local_report.reservoirFailures();
int num_rf = rf.size();
MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : rf) {
packReservoirFailure(f, buf, offset, mpi_communicator);
{
const auto rf = local_report.reservoirFailures();
const int num_rf = rf.size();
pack(&num_rf, 1, MPI_INT);
for (const auto& f : rf) {
packReservoirFailure(f, buf, offset, mpi_communicator);
}
}
// Reservoir convergence metrics.
const auto rm = local_report.reservoirConvergence();
int num_rm = rm.size();
MPI_Pack(&num_rm, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& m : rm) {
packReservoirConvergenceMetric(m, buf, offset, mpi_communicator);
{
const auto rm = local_report.reservoirConvergence();
const int num_rm = rm.size();
pack(&num_rm, 1, MPI_INT);
for (const auto& m : rm) {
packReservoirConvergenceMetric(m, buf, offset, mpi_communicator);
}
}
// Well failures.
const auto wf = local_report.wellFailures();
int num_wf = wf.size();
MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : wf) {
packWellFailure(f, buf, offset, mpi_communicator);
{
const auto wf = local_report.wellFailures();
const int num_wf = wf.size();
pack(&num_wf, 1, MPI_INT);
for (const auto& f : wf) {
packWellFailure(f, buf, offset, mpi_communicator);
}
}
}
@ -117,109 +173,181 @@ namespace
{
int int_pack_size = 0;
MPI_Pack_size(1, MPI_INT, mpi_communicator, &int_pack_size);
int double_pack_size = 0;
MPI_Pack_size(1, MPI_DOUBLE, mpi_communicator, &double_pack_size);
int char_pack_size = 0;
MPI_Pack_size(1, MPI_CHAR, mpi_communicator, &char_pack_size);
const int num_cnv_pv = local_report.cnvPvSplit().first.size();
const int num_rf = local_report.reservoirFailures().size();
const int num_rm = local_report.reservoirConvergence().size();
const int num_wf = local_report.wellFailures().size();
int wellnames_length = 0;
for (const auto& f : local_report.wellFailures()) {
wellnames_length += (f.wellName().size() + 1);
// Add one for the null terminator.
wellnames_length += f.wellName().size() + 1;
}
return (3 + 3*num_rf + 2*num_rm + 4*num_wf)*int_pack_size + (3 + 1*num_rm)*double_pack_size + wellnames_length;
return (3 + 1 + num_cnv_pv + 3*num_rf + 2*num_rm + 4*num_wf)*int_pack_size
+ (1 + 1 + num_cnv_pv + 1*num_rm)*double_pack_size
+ wellnames_length*char_pack_size;
}
ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
ConvergenceReport::ReservoirFailure
unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int severity = -1;
int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase);
auto unpackInt = [data = recv_buffer.data(),
size = static_cast<int>(recv_buffer.size()),
&offset, mpi_communicator]()
{
auto x = -1;
MPI_Unpack(data, size, &offset, &x, 1, MPI_INT, mpi_communicator);
return x;
};
const auto type = unpackInt();
const auto severity = unpackInt();
const auto phase = unpackInt();
return {
static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase
};
}
ConvergenceReport::ReservoirConvergenceMetric
unpackReservoirConvergenceMetric(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int phase = -1;
double value = -1.0;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &value, 1, MPI_DOUBLE, mpi_communicator);
auto unpack = [data = recv_buffer.data(),
size = static_cast<int>(recv_buffer.size()),
&offset, mpi_communicator](const auto type, auto x)
{
MPI_Unpack(data, size, &offset, &x, 1, type, mpi_communicator);
return x;
};
const auto type = unpack(MPI_INT, -1);
const auto phase = unpack(MPI_INT, -1);
const auto value = unpack(MPI_DOUBLE, -1.0);
return { static_cast<ConvergenceReport::ReservoirFailure::Type>(type), phase, value };
}
ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
ConvergenceReport::WellFailure
unpackWellFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int severity = -1;
int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
int name_length = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &name_length, 1, MPI_INT, mpi_communicator);
auto unpackInt = [data = recv_buffer.data(),
size = static_cast<int>(recv_buffer.size()),
&offset, mpi_communicator]()
{
auto x = -1;
MPI_Unpack(data, size, &offset, &x, 1, MPI_INT, mpi_communicator);
return x;
};
const auto type = unpackInt();
const auto severity = unpackInt();
const auto phase = unpackInt();
const auto name_length = unpackInt();
std::vector<char> namechars(name_length);
MPI_Unpack(data, recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, mpi_communicator);
std::string name(namechars.data());
return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase,
name);
MPI_Unpack(recv_buffer.data(), recv_buffer.size(), &offset,
namechars.data(), name_length,
MPI_CHAR, mpi_communicator);
return {
static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase,
const_cast<const std::vector<char>&>(namechars).data()
};
}
ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
ConvergenceReport
unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
auto* data = const_cast<char*>(recv_buffer.data());
double reportTime{0.0};
MPI_Unpack(data, recv_buffer.size(), &offset, &reportTime, 1, MPI_DOUBLE, mpi_communicator);
auto unpack = [data = recv_buffer.data(),
size = static_cast<int>(recv_buffer.size()),
&offset, mpi_communicator](const auto type, auto x)
{
MPI_Unpack(data, size, &offset, &x, 1, type, mpi_communicator);
ConvergenceReport cr{reportTime};
return x;
};
auto cnvPvFrac = std::pair { 0.0, 0.0 };
MPI_Unpack(data, recv_buffer.size(), &offset, &cnvPvFrac.first , 1, MPI_DOUBLE, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &cnvPvFrac.second, 1, MPI_DOUBLE, mpi_communicator);
auto cr = ConvergenceReport { unpack(MPI_DOUBLE, 0.0) };
cr.setPoreVolCnvViolationFraction(cnvPvFrac.first, cnvPvFrac.second);
{
const auto eligiblePoreVolume = unpack(MPI_DOUBLE, 0.0);
const auto num_cnv_pv = unpack(MPI_INT, -1);
int num_rf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, mpi_communicator);
for (int rf = 0; rf < num_rf; ++rf) {
ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset, mpi_communicator);
cr.setReservoirFailed(f);
auto cnvPvSplit = ConvergenceReport::CnvPvSplit {
std::piecewise_construct,
std::forward_as_tuple(num_cnv_pv),
std::forward_as_tuple(num_cnv_pv)
};
const auto* data = recv_buffer.data();
MPI_Unpack(data, recv_buffer.size(), &offset,
cnvPvSplit.first.data(), num_cnv_pv,
MPI_DOUBLE, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset,
cnvPvSplit.second.data(), num_cnv_pv,
MPI_DOUBLE, mpi_communicator);
cr.setCnvPoreVolSplit(cnvPvSplit, eligiblePoreVolume);
}
int num_rm = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rm, 1, MPI_INT, mpi_communicator);
for (int rm = 0; rm < num_rm; ++rm) {
cr.setReservoirConvergenceMetric(unpackReservoirConvergenceMetric(recv_buffer, offset, mpi_communicator));
{
const auto num_rf = unpack(MPI_INT, -1);
for (int rf = 0; rf < num_rf; ++rf) {
cr.setReservoirFailed(unpackReservoirFailure(recv_buffer, offset, mpi_communicator));
}
}
int num_wf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, mpi_communicator);
for (int wf = 0; wf < num_wf; ++wf) {
ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset, mpi_communicator);
cr.setWellFailed(f);
{
const auto num_rm = unpack(MPI_INT, -1);
for (int rm = 0; rm < num_rm; ++rm) {
cr.setReservoirConvergenceMetric(unpackReservoirConvergenceMetric(recv_buffer, offset, mpi_communicator));
}
}
{
const auto num_wf = unpack(MPI_INT, -1);
for (int wf = 0; wf < num_wf; ++wf) {
cr.setWellFailed(unpackWellFailure(recv_buffer, offset, mpi_communicator));
}
}
return cr;
}
ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ, MPI_Comm mpi_communicator)
ConvergenceReport
unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ,
MPI_Comm mpi_communicator)
{
ConvergenceReport cr;
const int num_processes = displ.size() - 1;
for (int process = 0; process < num_processes; ++process) {
int offset = displ[process];
cr += unpackSingleConvergenceReport(recv_buffer, offset, mpi_communicator);
assert(offset == displ[process + 1]);
}
return cr;
}
@ -231,33 +359,37 @@ namespace Opm
/// Create a global convergence report combining local
/// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication mpi_communicator)
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication mpi_communicator)
{
// Pack local report.
int message_size = messageSize(local_report, mpi_communicator);
const int message_size = messageSize(local_report, mpi_communicator);
std::vector<char> buffer(message_size);
int offset = 0;
packConvergenceReport(local_report, buffer, offset,mpi_communicator);
assert(offset == message_size);
{
int offset = 0;
packConvergenceReport(local_report, buffer, offset, mpi_communicator);
assert(offset == message_size);
}
// Get message sizes and create offset/displacement array for gathering.
int num_processes = -1;
MPI_Comm_size(mpi_communicator, &num_processes);
std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, mpi_communicator);
std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
// Gather.
std::vector<char> recv_buffer(displ.back());
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
const_cast<char*>(recv_buffer.data()), message_sizes.data(),
recv_buffer.data(), message_sizes.data(),
displ.data(), MPI_PACKED,
mpi_communicator);
// Unpack.
ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ, mpi_communicator);
return global_report;
return unpackConvergenceReports(recv_buffer, displ, mpi_communicator);
}
} // namespace Opm
@ -267,7 +399,7 @@ namespace Opm
namespace Opm
{
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication mpi_communicator [[maybe_unused]])
[[maybe_unused]] Parallel::Communication mpi_communicator)
{
return local_report;
}