Merge pull request #5338 from bska/infoiter-cnv-pv-split

Output CNV Histogram to INFOITER File
This commit is contained in:
Atgeirr Flø Rasmussen 2024-08-27 15:56:21 +02:00 committed by GitHub
commit d6c1ecea50
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 534 additions and 353 deletions

View File

@ -62,10 +62,12 @@
#include <cmath>
#include <filesystem>
#include <fstream>
#include <functional>
#include <iomanip>
#include <ios>
#include <limits>
#include <memory>
#include <numeric>
#include <sstream>
#include <tuple>
#include <utility>
@ -824,57 +826,88 @@ namespace Opm {
}
/// \brief Compute the total pore volume of cells violating CNV that are not part
/// of a numerical aquifer.
Scalar computeCnvErrorPv(const std::vector<Scalar>& B_avg, double dt)
/// \brief Compute pore-volume/cell count split among "converged",
/// "relaxed converged", "unconverged" cells based on CNV point
/// measures.
std::pair<std::vector<double>, std::vector<int>>
characteriseCnvPvSplit(const std::vector<Scalar>& B_avg, const double dt)
{
OPM_TIMEBLOCK(computeCnvErrorPv);
Scalar errorPV{};
const auto& model = simulator_.model();
const auto& problem = simulator_.problem();
const auto& residual = simulator_.model().linearizer().residual();
const auto& gridView = simulator().gridView();
ElementContext elemCtx(simulator_);
IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
OPM_BEGIN_PARALLEL_TRY_CATCH();
for (const auto& elem : elements(gridView, Dune::Partitions::interior))
{
// Skip cells of numerical Aquifer
if (isNumericalAquiferCell(elem))
{
continue;
}
elemCtx.updatePrimaryStencil(elem);
// elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) * model.dofTotalVolume(cell_idx);
const auto& cellResidual = residual[cell_idx];
bool cnvViolated = false;
// 0: cnv <= tolerance_cnv
// 1: tolerance_cnv < cnv <= tolerance_cnv_relaxed
// 2: tolerance_cnv_relaxed < cnv
constexpr auto numPvGroups = std::vector<double>::size_type{3};
for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx)
auto cnvPvSplit = std::pair<std::vector<double>, std::vector<int>> {
std::piecewise_construct,
std::forward_as_tuple(numPvGroups),
std::forward_as_tuple(numPvGroups)
};
auto maxCNV = [&B_avg, dt](const auto& residual, const double pvol)
{
return (dt / pvol) *
std::inner_product(residual.begin(), residual.end(),
B_avg.begin(), Scalar{0},
[](const Scalar m, const auto& x)
{
using std::abs;
Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
cnvViolated = cnvViolated || (abs(CNV) > param_.tolerance_cnv_);
return std::max(m, abs(x));
}, std::multiplies<>{});
};
auto& [splitPV, cellCntPV] = cnvPvSplit;
const auto& model = this->simulator().model();
const auto& problem = this->simulator().problem();
const auto& residual = model.linearizer().residual();
const auto& gridView = this->simulator().gridView();
const IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
ElementContext elemCtx(this->simulator());
OPM_BEGIN_PARALLEL_TRY_CATCH();
for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
// Skip cells of numerical Aquifer
if (isNumericalAquiferCell(elem)) {
continue;
}
if (cnvViolated)
elemCtx.updatePrimaryStencil(elem);
const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0)
* model.dofTotalVolume(cell_idx);
const auto maxCnv = maxCNV(residual[cell_idx], pvValue);
const auto ix = (maxCnv > this->param_.tolerance_cnv_)
+ (maxCnv > this->param_.tolerance_cnv_relaxed_);
splitPV[ix] += static_cast<double>(pvValue);
++cellCntPV[ix];
}
OPM_END_PARALLEL_TRY_CATCH("BlackoilModel::characteriseCnvPvSplit() failed: ",
this->grid_.comm());
this->grid_.comm().sum(splitPV .data(), splitPV .size());
this->grid_.comm().sum(cellCntPV.data(), cellCntPV.size());
return cnvPvSplit;
}
void updateTUNING(const Tuning& tuning)
{
errorPV += pvValue;
}
}
this->param_.tolerance_mb_ = tuning.XXXMBE;
OPM_END_PARALLEL_TRY_CATCH("BlackoilModel::ComputeCnvError() failed: ", grid_.comm());
return grid_.comm().sum(errorPV);
}
void updateTUNING(const Tuning& tuning) {
param_.tolerance_mb_ = tuning.XXXMBE;
if ( terminal_output_ ) {
OpmLog::debug(fmt::format("Setting BlackoilModel mass balance limit (XXXMBE) to {:.2e}", tuning.XXXMBE));
if (terminal_output_) {
OpmLog::debug(fmt::format("Setting BlackoilModel mass "
"balance limit (XXXMBE) to {:.2e}",
tuning.XXXMBE));
}
}
@ -889,54 +922,93 @@ namespace Opm {
OPM_TIMEBLOCK(getReservoirConvergence);
using Vector = std::vector<Scalar>;
ConvergenceReport report{reportTime};
const int numComp = numEq;
Vector R_sum(numComp, 0.0 );
Vector maxCoeff(numComp, std::numeric_limits< Scalar >::lowest() );
Vector R_sum(numComp, Scalar{0});
Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest());
std::vector<int> maxCoeffCell(numComp, -1);
const auto [ pvSumLocal, numAquiferPvSumLocal] =
const auto [pvSumLocal, numAquiferPvSumLocal] =
this->localConvergenceData(R_sum, maxCoeff, B_avg, maxCoeffCell);
// compute global sum and max of quantities
const auto [ pvSum, numAquiferPvSum ] =
convergenceReduction(grid_.comm(), pvSumLocal,
const auto& [pvSum, numAquiferPvSum] =
this->convergenceReduction(this->grid_.comm(),
pvSumLocal,
numAquiferPvSumLocal,
R_sum, maxCoeff, B_avg);
const auto cnvErrorPvFraction =
computeCnvErrorPv(B_avg, dt)
/ (pvSum - numAquiferPvSum);
report.setCnvPoreVolSplit(this->characteriseCnvPvSplit(B_avg, dt),
pvSum - numAquiferPvSum);
// For each iteration, we need to determine whether to use the relaxed tolerances.
// To disable the usage of relaxed tolerances, you can set the relaxed tolerances as the strict tolerances.
// For each iteration, we need to determine whether to use the
// relaxed tolerances. To disable the usage of relaxed
// tolerances, you can set the relaxed tolerances as the strict
// tolerances. If min_strict_mb_iter = -1 (default) we use a
// relaxed tolerance for the mass balance for the last
// iterations. For positive values we use the relaxed tolerance
// after the given number of iterations
const bool relax_final_iteration_mb =
(this->param_.min_strict_mb_iter_ < 0)
&& (iteration == maxIter);
// If min_strict_mb_iter = -1 (default) we use a relaxed tolerance for the mass balance for the last iterations
// For positive values we use the relaxed tolerance after the given number of iterations
const bool relax_final_iteration_mb = (param_.min_strict_mb_iter_ < 0) && (iteration == maxIter);
const bool use_relaxed_mb = relax_final_iteration_mb || (param_.min_strict_mb_iter_ >= 0 && iteration >= param_.min_strict_mb_iter_);
const bool use_relaxed_mb = relax_final_iteration_mb ||
((this->param_.min_strict_mb_iter_ >= 0) &&
(iteration >= this->param_.min_strict_mb_iter_));
// If min_strict_cnv_iter = -1 we use a relaxed tolerance for
// the cnv for the last iterations. For positive values we use
// the relaxed tolerance after the given number of iterations.
// We also use relaxed tolerances for cells with total
// pore-volume less than relaxed_max_pv_fraction_. Default
// value of relaxed_max_pv_fraction_ is 0.03
const bool relax_final_iteration_cnv =
(this->param_.min_strict_cnv_iter_ < 0)
&& (iteration == maxIter);
// If min_strict_cnv_iter = -1 we use a relaxed tolerance for the cnv for the last iterations
// For positive values we use the relaxed tolerance after the given number of iterations
// We also use relaxed tolerances for cells with total poro volume less than relaxed_max_pv_fraction_
// Default value of relaxed_max_pv_fraction_ is 0.03
const bool relax_final_iteration_cnv = (param_.min_strict_cnv_iter_ < 0) && (iteration == maxIter);
const bool relax_iter_cnv = param_.min_strict_cnv_iter_ >= 0 && iteration >= param_.min_strict_cnv_iter_;
const bool relax_pv_fraction_cnv = cnvErrorPvFraction < param_.relaxed_max_pv_fraction_;
const bool use_relaxed_cnv = relax_final_iteration_cnv || relax_pv_fraction_cnv || relax_iter_cnv;
const bool relax_iter_cnv = (this->param_.min_strict_cnv_iter_ >= 0)
&& (iteration >= this->param_.min_strict_cnv_iter_);
if (relax_final_iteration_mb || relax_final_iteration_cnv) {
if ( terminal_output_ ) {
std::string message = "Number of newton iterations reached its maximum try to continue with relaxed tolerances:";
if (relax_final_iteration_mb)
// Note trailing parentheses here, just before the final
// semicolon. This is an immediately invoked function
// expression which calculates a single boolean value.
const auto relax_pv_fraction_cnv =
[&report, pvSum, numAquiferPvSum, this]()
{
const auto& cnvPvSplit = report.cnvPvSplit().first;
// [1]: tol < cnv <= relaxed
// [2]: relaxed < cnv
return static_cast<Scalar>(cnvPvSplit[1] + cnvPvSplit[2]) <
this->param_.relaxed_max_pv_fraction_ * (pvSum - numAquiferPvSum);
}();
const bool use_relaxed_cnv = relax_final_iteration_cnv
|| relax_pv_fraction_cnv
|| relax_iter_cnv;
if ((relax_final_iteration_mb || relax_final_iteration_cnv) &&
this->terminal_output_)
{
std::string message =
"Number of newton iterations reached its maximum "
"try to continue with relaxed tolerances:";
if (relax_final_iteration_mb) {
message += fmt::format(" MB: {:.1e}", param_.tolerance_mb_relaxed_);
if (relax_final_iteration_cnv)
}
if (relax_final_iteration_cnv) {
message += fmt::format(" CNV: {:.1e}", param_.tolerance_cnv_relaxed_);
}
OpmLog::debug(message);
}
}
const Scalar tol_cnv = use_relaxed_cnv ? param_.tolerance_cnv_relaxed_ : param_.tolerance_cnv_;
const Scalar tol_mb = use_relaxed_mb ? param_.tolerance_mb_relaxed_ : param_.tolerance_mb_;
const auto tol_cnv = use_relaxed_cnv ? param_.tolerance_cnv_relaxed_ : param_.tolerance_cnv_;
const auto tol_mb = use_relaxed_mb ? param_.tolerance_mb_relaxed_ : param_.tolerance_mb_;
// Finish computation
std::vector<Scalar> CNV(numComp);
@ -948,43 +1020,48 @@ namespace Opm {
residual_norms.push_back(CNV[compIdx]);
}
// Create convergence report.
ConvergenceReport report{reportTime};
report.setPoreVolCnvViolationFraction(cnvErrorPvFraction, pvSum - numAquiferPvSum);
using CR = ConvergenceReport;
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
const Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
const CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
CR::ReservoirFailure::Type::Cnv };
const Scalar tol[2] = { tol_mb, tol_cnv };
const Scalar res[2] = {
mass_balance_residual[compIdx], CNV[compIdx],
};
const CR::ReservoirFailure::Type types[2] = {
CR::ReservoirFailure::Type::MassBalance,
CR::ReservoirFailure::Type::Cnv,
};
const Scalar tol[2] = { tol_mb, tol_cnv, };
for (int ii : {0, 1}) {
if (std::isnan(res[ii])) {
report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
if ( terminal_output_ ) {
if (this->terminal_output_) {
OpmLog::debug("NaN residual for " + this->compNames_.name(compIdx) + " equation.");
}
} else if (res[ii] > maxResidualAllowed()) {
}
else if (res[ii] > maxResidualAllowed()) {
report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
if ( terminal_output_ ) {
if (this->terminal_output_) {
OpmLog::debug("Too large residual for " + this->compNames_.name(compIdx) + " equation.");
}
} else if (res[ii] < 0.0) {
}
else if (res[ii] < 0.0) {
report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
if ( terminal_output_ ) {
if (this->terminal_output_) {
OpmLog::debug("Negative residual for " + this->compNames_.name(compIdx) + " equation.");
}
} else if (res[ii] > tol[ii]) {
}
else if (res[ii] > tol[ii]) {
report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
}
report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii]);
}
}
// Output of residuals.
if ( terminal_output_ )
{
if (this->terminal_output_) {
// Only rank 0 does print to std::cout
if (iteration == 0) {
std::string msg = "Iter";
@ -993,25 +1070,32 @@ namespace Opm {
msg += this->compNames_.name(compIdx)[0];
msg += ") ";
}
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
msg += " CNV(";
msg += this->compNames_.name(compIdx)[0];
msg += ") ";
}
OpmLog::debug(msg);
}
std::ostringstream ss;
const std::streamsize oprec = ss.precision(3);
const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
ss << std::setw(4) << iteration;
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
ss << std::setw(11) << mass_balance_residual[compIdx];
}
for (int compIdx = 0; compIdx < numComp; ++compIdx) {
ss << std::setw(11) << CNV[compIdx];
}
ss.precision(oprec);
ss.flags(oflags);
OpmLog::debug(ss.str());
}

View File

@ -33,6 +33,7 @@
#include <functional>
#include <iomanip>
#include <iterator>
#include <limits>
#include <mutex>
#include <numeric>
#include <optional>
@ -57,8 +58,13 @@ namespace {
"ReportStep"s,
"TimeStep"s,
"Time"s,
"CnvErrPvFrac"s,
"Iteration"s,
"CnvPvFracConv"s,
"CnvPvFracRelax"s,
"CnvPvFracUnconv"s,
"CnvCellCntConv"s,
"CnvCellCntRelax"s,
"CnvCellCntUnconv"s,
};
}
@ -135,28 +141,68 @@ namespace {
return { minColSize, headerSize };
}
void writeConvergenceRequest(std::ostream& os,
void writeTimeColumns(std::ostream& os,
const Opm::ConvergenceOutputThread::ConvertToTimeUnits& convertTime,
const std::string::size_type firstColSize,
const std::string::size_type colSize,
const int iter,
const Opm::ConvergenceReport& report,
const Opm::ConvergenceReportQueue::OutputRequest& request)
{
os.setf(std::ios_base::scientific);
auto iter = 0;
for (const auto& report : request.reports) {
os << std::setw(firstColSize) << request.reportStep << ' '
<< std::setw(firstColSize) << request.currentStep << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< convertTime(report.reportTime()) << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< report.cnvViolatedPvFraction() << ' '
<< std::setw(firstColSize) << iter;
}
void writeCnvPvSplit(std::ostream& os,
const std::vector<double>::size_type expectedNumValues,
const std::string::size_type firstColSize,
const Opm::ConvergenceReport& report)
{
const auto& [splitPv, cellCnt] = report.cnvPvSplit();
if (splitPv.size() == expectedNumValues) {
for (const auto& pv : splitPv) {
os << ' ' << std::setprecision(4) << std::setw(firstColSize)
<< pv / report.eligiblePoreVolume();
}
}
else {
constexpr auto val = std::numeric_limits<double>::has_quiet_NaN
? std::numeric_limits<double>::quiet_NaN()
: -1.0;
for (auto i = 0*expectedNumValues; i < expectedNumValues; ++i) {
os << ' ' << std::setprecision(4) << std::setw(firstColSize) << val;
}
}
if (cellCnt.size() == expectedNumValues) {
for (const auto& cnt : cellCnt) {
os << ' ' << std::setw(firstColSize) << cnt;
}
}
else {
for (auto i = 0*expectedNumValues; i < expectedNumValues; ++i) {
os << ' ' << std::setw(firstColSize) << -1;
}
}
}
void writeReservoirConvergence(std::ostream& os,
const std::string::size_type colSize,
const Opm::ConvergenceReport& report)
{
for (const auto& metric : report.reservoirConvergence()) {
os << std::setprecision(4) << std::setw(colSize) << metric.value();
}
}
void writeWellConvergence(std::ostream& os,
const std::string::size_type colSize,
const Opm::ConvergenceReport& report)
{
os << std::right << std::setw(colSize)
<< (report.wellFailed() ? "FAIL" : "CONV");
@ -165,6 +211,26 @@ namespace {
os << ' ' << to_string(wf);
}
}
}
void writeConvergenceRequest(std::ostream& os,
const Opm::ConvergenceOutputThread::ConvertToTimeUnits& convertTime,
const std::string::size_type firstColSize,
const std::string::size_type colSize,
const Opm::ConvergenceReportQueue::OutputRequest& request)
{
os.setf(std::ios_base::scientific);
const auto expectNumCnvSplit = std::vector<double>::size_type{3};
auto iter = 0;
for (const auto& report : request.reports) {
writeTimeColumns(os, convertTime, firstColSize, iter, report, request);
writeCnvPvSplit(os, expectNumCnvSplit, firstColSize, report);
writeReservoirConvergence(os, colSize, report);
writeWellConvergence(os, colSize, report);
os << '\n';

View File

@ -1,6 +1,6 @@
/*
Copyright 2018 SINTEF Digital, Mathematics and Cybernetics.
Copyright 2018 Equinor.
Copyright 2018, 2024 Equinor.
This file is part of the Open Porous Media project (OPM).
@ -53,11 +53,19 @@ namespace Opm
NotANumber = 3,
};
using CnvPvSplit = std::pair<
std::vector<double>,
std::vector<int>>;
class ReservoirFailure
{
public:
enum struct Type { Invalid, MassBalance, Cnv };
// Default constructor needed for object serialisation. Don't
// use this for anything else.
ReservoirFailure() = default;
ReservoirFailure(Type t, Severity s, int phase)
: type_(t), severity_(s), phase_(phase)
{}
@ -66,15 +74,29 @@ namespace Opm
Severity severity() const { return severity_; }
int phase() const { return phase_; }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->type_);
serializer(this->severity_);
serializer(this->phase_);
}
private:
Type type_;
Severity severity_;
int phase_;
// Note to maintainers: If you change this list of data members,
// then please update serializeOp() accordingly.
Type type_ { Type::Invalid };
Severity severity_ { Severity::None };
int phase_ { -1 };
};
class ReservoirConvergenceMetric
{
public:
// Default constructor needed for object serialisation. Don't
// use this for anything else.
ReservoirConvergenceMetric() = default;
ReservoirConvergenceMetric(ReservoirFailure::Type t, int phase, double value)
: type_(t), phase_(phase), value_(value)
{}
@ -83,10 +105,20 @@ namespace Opm
int phase() const { return phase_; }
double value() const { return value_; }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->type_);
serializer(this->phase_);
serializer(this->value_);
}
private:
ReservoirFailure::Type type_;
int phase_;
double value_;
// Note to maintainers: If you change this list of data members,
// then please update serializeOp() accordingly.
ReservoirFailure::Type type_ { ReservoirFailure::Type::Invalid };
int phase_ { -1 };
double value_ { 0.0 };
};
class WellFailure
@ -103,6 +135,10 @@ namespace Opm
WrongFlowDirection,
};
// Default constructor needed for object serialisation. Don't
// use this for anything else.
WellFailure() = default;
WellFailure(Type t, Severity s, int phase, const std::string& well_name)
: type_(t), severity_(s), phase_(phase), well_name_(well_name)
{}
@ -112,11 +148,22 @@ namespace Opm
int phase() const { return phase_; }
const std::string& wellName() const { return well_name_; }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->type_);
serializer(this->severity_);
serializer(this->phase_);
serializer(this->well_name_);
}
private:
Type type_;
Severity severity_;
int phase_;
std::string well_name_;
// Note to maintainers: If you change this list of data members,
// then please update serializeOp() accordingly.
Type type_ { Type::Invalid };
Severity severity_ { Severity::None };
int phase_ { -1 };
std::string well_name_ {};
};
// ----------- Mutating member functions -----------
@ -164,11 +211,11 @@ namespace Opm
wellGroupTargetsViolated_ = wellGroupTargetsViolated;
}
void setPoreVolCnvViolationFraction(const double cnvErrorPvFraction,
const double cnvErrorPvFractionDenom)
void setCnvPoreVolSplit(const CnvPvSplit& cnvPvSplit,
const double eligiblePoreVolume)
{
this->pvFracCnvViol_ = cnvErrorPvFraction;
this->pvFracCnvViolDenom_ = cnvErrorPvFractionDenom;
this->cnvPvSplit_ = cnvPvSplit;
this->eligiblePoreVolume_ = eligiblePoreVolume;
}
ConvergenceReport& operator+=(const ConvergenceReport& other)
@ -182,18 +229,16 @@ namespace Opm
assert(wellFailed() != well_failures_.empty());
wellGroupTargetsViolated_ = (wellGroupTargetsViolated_ || other.wellGroupTargetsViolated_);
if ((this->pvFracCnvViolDenom_ > 0.0) ||
(other.pvFracCnvViolDenom_ > 0.0))
{
this->pvFracCnvViol_ = (this->pvFracCnvViol_ * this->pvFracCnvViolDenom_ +
other.pvFracCnvViol_ * other.pvFracCnvViolDenom_)
/ (this->pvFracCnvViolDenom_ + other.pvFracCnvViolDenom_);
this->pvFracCnvViolDenom_ += other.pvFracCnvViolDenom_;
}
else {
this->pvFracCnvViol_ = 0.0;
this->pvFracCnvViolDenom_ = 0.0;
// Note regarding the CNV pore-volume split: We depend on the
// fact that the quantities have already been aggregated across
// all MPI ranks--see the implementation of member function
// BlackoilModel::getReservoirConvergence() for details--and are
// therefore equal on all ranks. Consequently, we simply assign
// 'other's values here, if it is non-empty. Empty splits
// typically come from well contributions.
if (! other.cnvPvSplit_.first.empty()) {
this->cnvPvSplit_ = other.cnvPvSplit_;
this->eligiblePoreVolume_ = other.eligiblePoreVolume_;
}
return *this;
@ -206,14 +251,14 @@ namespace Opm
return reportTime_;
}
double cnvViolatedPvFraction() const
double eligiblePoreVolume() const
{
return this->pvFracCnvViol_;
return this->eligiblePoreVolume_;
}
std::pair<double, double> cnvViolatedPvFractionPack() const
const CnvPvSplit& cnvPvSplit() const
{
return { this->pvFracCnvViol_, this->pvFracCnvViolDenom_ };
return this->cnvPvSplit_;
}
bool converged() const
@ -262,16 +307,31 @@ namespace Opm
return s;
}
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->reportTime_);
serializer(this->status_);
serializer(this->res_failures_);
serializer(this->well_failures_);
serializer(this->res_convergence_);
serializer(this->wellGroupTargetsViolated_);
serializer(this->cnvPvSplit_);
serializer(this->eligiblePoreVolume_);
}
private:
// ----------- Member variables -----------
// Note to maintainers: If you change this list of data members,
// then please update serializeOp() accordingly.
double reportTime_;
Status status_;
std::vector<ReservoirFailure> res_failures_;
std::vector<WellFailure> well_failures_;
std::vector<ReservoirConvergenceMetric> res_convergence_;
bool wellGroupTargetsViolated_;
double pvFracCnvViol_{};
double pvFracCnvViolDenom_{};
CnvPvSplit cnvPvSplit_{};
double eligiblePoreVolume_{};
};
struct StepReport

View File

@ -1,5 +1,5 @@
/*
Copyright 2018, 2022 Equinor ASA.
Copyright 2018, 2022, 2024 Equinor ASA.
Copyright 2018 SINTEF Digital, Mathematics and Cybernetics.
This file is part of the Open Porous Media project (OPM).
@ -22,252 +22,154 @@
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <utility>
#include <opm/simulators/utils/ParallelCommunication.hpp>
#if HAVE_MPI
#include <mpi.h>
#include <opm/common/utility/Serializer.hpp>
namespace
{
#include <opm/simulators/utils/MPIPacker.hpp>
using Opm::ConvergenceReport;
#include <opm/grid/common/CommunicationUtils.hpp>
void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
#include <algorithm>
#include <tuple>
#include <vector>
namespace {
/// Special purpose utility to collect each rank's local convergence
/// report object and distribute those to all ranks.
///
/// This particular feature could arguably be a part of the
/// Opm::MpiSerializer class, but we create it here first as a narrowly
/// scoped testing ground.
///
/// Inherits from Opm::Serializer<> in order to use that type's pack()
/// and unpack() member functions along with its internal 'm_buffer'
/// data member.
class CollectConvReports : private Opm::Serializer<Opm::Mpi::Packer>
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
}
public:
/// Constructor.
///
/// \param[in] packer Packing object to convert in-memory objects to
/// a byte representation. Lifetime must exceed that of the
/// CollectConvReports object.
///
/// \param[in] comm MPI communicator. Should be the same as the
/// internal communicator in the \p packer object.
explicit CollectConvReports(const Opm::Mpi::Packer& packer,
const Opm::Parallel::Communication comm)
: Opm::Serializer<Opm::Mpi::Packer> { packer }
, comm_ { comm }
{}
void packReservoirConvergenceMetric(const ConvergenceReport::ReservoirConvergenceMetric& m,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
/// Collect local convergence reports from each MPI rank.
///
/// \param[in] report Local convergence report from the current MPI
/// rank.
///
/// \return Collection of local convergence reports from all MPI
/// ranks in the current communicator. Report objects returned in
/// rank order.
std::vector<Opm::ConvergenceReport>
operator()(const Opm::ConvergenceReport& report);
private:
/// MPI communicator.
Opm::Parallel::Communication comm_;
/// Byte representation of local convergence report objects from all
/// ranks.
///
/// Only valid during a call to operator().
std::vector<char> rankBuffers_{};
/// Start pointers into rankBuffers_ for each rank's local
/// convergence report object.
std::vector<int> rankStart_{};
/// Reconstitute a convergence report from byte stream representation.
///
/// \param[in] rank MPI rank for which to reconstitute the local
/// convergence report object.
///
/// \param[in,out] report \p rank's local convergence report object.
/// On entry, a default constructed object usable as a destination
/// for deserialisation. On exit, a fully populated convergence
/// report object filled from the byte stream of \p rank.
void deserialise(const std::vector<int>::size_type rank,
Opm::ConvergenceReport& report);
};
std::vector<Opm::ConvergenceReport>
CollectConvReports::operator()(const Opm::ConvergenceReport& report)
{
int type = static_cast<int>(m.type());
int phase = m.phase();
double value = m.value();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&value, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
auto rankReports = std::vector<Opm::ConvergenceReport>(this->comm_.size());
this->pack(report);
std::tie(this->rankBuffers_, this->rankStart_) =
Opm::allGatherv(this->m_buffer, this->comm_);
auto rank = std::vector<int>::size_type{0};
for (auto& rankReport : rankReports) {
this->deserialise(rank++, rankReport);
}
void packWellFailure(const ConvergenceReport::WellFailure& f,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
return rankReports;
}
void CollectConvReports::deserialise(const std::vector<int>::size_type rank,
Opm::ConvergenceReport& report)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator.
MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(const_cast<char*>(f.wellName().c_str()), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, mpi_communicator);
auto begin = this->rankBuffers_.begin() + this->rankStart_[rank + 0];
auto end = this->rankBuffers_.begin() + this->rankStart_[rank + 1];
this->m_buffer.assign(begin, end);
this->m_packSize = std::distance(begin, end);
this->unpack(report);
}
void packConvergenceReport(const ConvergenceReport& local_report,
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
{
// Pack the data.
// Status will not be packed, it is possible to deduce from the other data.
double reportTime = local_report.reportTime();
MPI_Pack(&reportTime, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
{
const auto cnvPvFrac = local_report.cnvViolatedPvFractionPack();
MPI_Pack(&cnvPvFrac.first , 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&cnvPvFrac.second, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
}
// Reservoir failures.
const auto rf = local_report.reservoirFailures();
int num_rf = rf.size();
MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : rf) {
packReservoirFailure(f, buf, offset, mpi_communicator);
}
// Reservoir convergence metrics.
const auto rm = local_report.reservoirConvergence();
int num_rm = rm.size();
MPI_Pack(&num_rm, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& m : rm) {
packReservoirConvergenceMetric(m, buf, offset, mpi_communicator);
}
// Well failures.
const auto wf = local_report.wellFailures();
int num_wf = wf.size();
MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : wf) {
packWellFailure(f, buf, offset, mpi_communicator);
}
}
int messageSize(const ConvergenceReport& local_report, MPI_Comm mpi_communicator)
{
int int_pack_size = 0;
MPI_Pack_size(1, MPI_INT, mpi_communicator, &int_pack_size);
int double_pack_size = 0;
MPI_Pack_size(1, MPI_DOUBLE, mpi_communicator, &double_pack_size);
const int num_rf = local_report.reservoirFailures().size();
const int num_rm = local_report.reservoirConvergence().size();
const int num_wf = local_report.wellFailures().size();
int wellnames_length = 0;
for (const auto& f : local_report.wellFailures()) {
wellnames_length += (f.wellName().size() + 1);
}
return (3 + 3*num_rf + 2*num_rm + 4*num_wf)*int_pack_size + (3 + 1*num_rm)*double_pack_size + wellnames_length;
}
ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int severity = -1;
int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase);
}
ConvergenceReport::ReservoirConvergenceMetric
unpackReservoirConvergenceMetric(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int phase = -1;
double value = -1.0;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &value, 1, MPI_DOUBLE, mpi_communicator);
return { static_cast<ConvergenceReport::ReservoirFailure::Type>(type), phase, value };
}
ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int severity = -1;
int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
int name_length = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &name_length, 1, MPI_INT, mpi_communicator);
std::vector<char> namechars(name_length);
MPI_Unpack(data, recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, mpi_communicator);
std::string name(namechars.data());
return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase,
name);
}
ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
auto* data = const_cast<char*>(recv_buffer.data());
double reportTime{0.0};
MPI_Unpack(data, recv_buffer.size(), &offset, &reportTime, 1, MPI_DOUBLE, mpi_communicator);
ConvergenceReport cr{reportTime};
auto cnvPvFrac = std::pair { 0.0, 0.0 };
MPI_Unpack(data, recv_buffer.size(), &offset, &cnvPvFrac.first , 1, MPI_DOUBLE, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &cnvPvFrac.second, 1, MPI_DOUBLE, mpi_communicator);
cr.setPoreVolCnvViolationFraction(cnvPvFrac.first, cnvPvFrac.second);
int num_rf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, mpi_communicator);
for (int rf = 0; rf < num_rf; ++rf) {
ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset, mpi_communicator);
cr.setReservoirFailed(f);
}
int num_rm = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rm, 1, MPI_INT, mpi_communicator);
for (int rm = 0; rm < num_rm; ++rm) {
cr.setReservoirConvergenceMetric(unpackReservoirConvergenceMetric(recv_buffer, offset, mpi_communicator));
}
int num_wf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, mpi_communicator);
for (int wf = 0; wf < num_wf; ++wf) {
ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset, mpi_communicator);
cr.setWellFailed(f);
}
return cr;
}
ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ, MPI_Comm mpi_communicator)
{
ConvergenceReport cr;
const int num_processes = displ.size() - 1;
for (int process = 0; process < num_processes; ++process) {
int offset = displ[process];
cr += unpackSingleConvergenceReport(recv_buffer, offset, mpi_communicator);
assert(offset == displ[process + 1]);
}
return cr;
}
} // anonymous namespace
} // Anonymous namespace
namespace Opm
{
/// Create a global convergence report combining local
/// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication mpi_communicator)
/// Create a global convergence report combining local (per-process)
/// reports.
ConvergenceReport
gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication mpi_communicator)
{
// Pack local report.
int message_size = messageSize(local_report, mpi_communicator);
std::vector<char> buffer(message_size);
int offset = 0;
packConvergenceReport(local_report, buffer, offset,mpi_communicator);
assert(offset == message_size);
if (mpi_communicator.size() == 1) {
// Sequential run, no communication needed.
return local_report;
}
// Get message sizes and create offset/displacement array for gathering.
int num_processes = -1;
MPI_Comm_size(mpi_communicator, &num_processes);
std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, mpi_communicator);
std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
// Multi-process run (common case). Need object distribution.
auto combinedReport = ConvergenceReport {};
// Gather.
std::vector<char> recv_buffer(displ.back());
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
const_cast<char*>(recv_buffer.data()), message_sizes.data(),
displ.data(), MPI_PACKED,
mpi_communicator);
const auto packer = Mpi::Packer { mpi_communicator };
// Unpack.
ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ, mpi_communicator);
return global_report;
const auto rankReports =
CollectConvReports { packer, mpi_communicator }(local_report);
for (const auto& rankReport : rankReports) {
combinedReport += rankReport;
}
return combinedReport;
}
} // namespace Opm
#else // HAVE_MPI
#else // !HAVE_MPI
namespace Opm
{
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication mpi_communicator [[maybe_unused]])
ConvergenceReport
gatherConvergenceReport(const ConvergenceReport& local_report,
[[maybe_unused]] Parallel::Communication mpi_communicator)
{
return local_report;
}

View File

@ -30,9 +30,10 @@ namespace Opm
/// Create a global convergence report combining local
/// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication communicator);
ConvergenceReport
gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication communicator);
} // namespace Opm
#endif // OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED

View File

@ -106,6 +106,74 @@ BOOST_AUTO_TEST_CASE(EvenHaveFailure)
}
}
namespace {
class NProc_Is_Not
{
public:
explicit NProc_Is_Not(const int rejectNP)
: rejectNP_ { rejectNP }
{}
boost::test_tools::assertion_result
operator()(boost::unit_test::test_unit_id) const
{
auto comm = Opm::Parallel::Communication {
Dune::MPIHelper::getCommunicator()
};
if (comm.size() != this->rejectNP_) {
return true;
}
boost::test_tools::assertion_result response(false);
response.message() << "Number of MPI processes ("
<< comm.size()
<< ") matches rejected case.";
return response;
}
private:
int rejectNP_{};
};
} // Anonymous namespace
BOOST_AUTO_TEST_CASE(CNV_PV_SPLIT, * boost::unit_test::precondition(NProc_Is_Not{1}))
{
const auto cc = Dune::MPIHelper::getCommunication();
auto loc_rpt = Opm::ConvergenceReport {};
if (cc.rank() != 0) {
// All ranks are supposed to have the *same* pore-volume split of
// the CNV metrics, except if the pv split is empty.
const auto pvSplit = Opm::ConvergenceReport::CnvPvSplit {
{ 0.75, 0.2, 0.05, },
{ 1234, 56 , 7 , }
};
loc_rpt.setCnvPoreVolSplit(pvSplit, 9.876e5);
}
const auto cr = gatherConvergenceReport(loc_rpt, cc);
const auto& [pvFrac, cellCnt] = cr.cnvPvSplit();
BOOST_REQUIRE_EQUAL(pvFrac.size(), std::size_t{3});
BOOST_REQUIRE_EQUAL(cellCnt.size(), std::size_t{3});
BOOST_CHECK_CLOSE(cr.eligiblePoreVolume(), 9.876e5, 1.0e-8);
BOOST_CHECK_CLOSE(pvFrac[0], 0.75, 1.0e-8);
BOOST_CHECK_CLOSE(pvFrac[1], 0.20, 1.0e-8);
BOOST_CHECK_CLOSE(pvFrac[2], 0.05, 1.0e-8);
BOOST_CHECK_EQUAL(cellCnt[0], 1234);
BOOST_CHECK_EQUAL(cellCnt[1], 56);
BOOST_CHECK_EQUAL(cellCnt[2], 7);
}
int main(int argc, char** argv)
{
Dune::MPIHelper::instance(argc, argv);