Merge pull request #5338 from bska/infoiter-cnv-pv-split

Output CNV Histogram to INFOITER File
This commit is contained in:
Atgeirr Flø Rasmussen 2024-08-27 15:56:21 +02:00 committed by GitHub
commit d6c1ecea50
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 534 additions and 353 deletions

View File

@ -62,10 +62,12 @@
#include <cmath> #include <cmath>
#include <filesystem> #include <filesystem>
#include <fstream> #include <fstream>
#include <functional>
#include <iomanip> #include <iomanip>
#include <ios> #include <ios>
#include <limits> #include <limits>
#include <memory> #include <memory>
#include <numeric>
#include <sstream> #include <sstream>
#include <tuple> #include <tuple>
#include <utility> #include <utility>
@ -824,57 +826,88 @@ namespace Opm {
} }
/// \brief Compute the total pore volume of cells violating CNV that are not part /// \brief Compute pore-volume/cell count split among "converged",
/// of a numerical aquifer. /// "relaxed converged", "unconverged" cells based on CNV point
Scalar computeCnvErrorPv(const std::vector<Scalar>& B_avg, double dt) /// measures.
std::pair<std::vector<double>, std::vector<int>>
characteriseCnvPvSplit(const std::vector<Scalar>& B_avg, const double dt)
{ {
OPM_TIMEBLOCK(computeCnvErrorPv); OPM_TIMEBLOCK(computeCnvErrorPv);
Scalar errorPV{};
const auto& model = simulator_.model(); // 0: cnv <= tolerance_cnv
const auto& problem = simulator_.problem(); // 1: tolerance_cnv < cnv <= tolerance_cnv_relaxed
const auto& residual = simulator_.model().linearizer().residual(); // 2: tolerance_cnv_relaxed < cnv
const auto& gridView = simulator().gridView(); constexpr auto numPvGroups = std::vector<double>::size_type{3};
ElementContext elemCtx(simulator_);
IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid()); auto cnvPvSplit = std::pair<std::vector<double>, std::vector<int>> {
std::piecewise_construct,
std::forward_as_tuple(numPvGroups),
std::forward_as_tuple(numPvGroups)
};
auto maxCNV = [&B_avg, dt](const auto& residual, const double pvol)
{
return (dt / pvol) *
std::inner_product(residual.begin(), residual.end(),
B_avg.begin(), Scalar{0},
[](const Scalar m, const auto& x)
{
using std::abs;
return std::max(m, abs(x));
}, std::multiplies<>{});
};
auto& [splitPV, cellCntPV] = cnvPvSplit;
const auto& model = this->simulator().model();
const auto& problem = this->simulator().problem();
const auto& residual = model.linearizer().residual();
const auto& gridView = this->simulator().gridView();
const IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
ElementContext elemCtx(this->simulator());
OPM_BEGIN_PARALLEL_TRY_CATCH(); OPM_BEGIN_PARALLEL_TRY_CATCH();
for (const auto& elem : elements(gridView, Dune::Partitions::interior)) for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
{
// Skip cells of numerical Aquifer // Skip cells of numerical Aquifer
if (isNumericalAquiferCell(elem)) if (isNumericalAquiferCell(elem)) {
{
continue; continue;
} }
elemCtx.updatePrimaryStencil(elem); elemCtx.updatePrimaryStencil(elem);
// elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0); const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) * model.dofTotalVolume(cell_idx); const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0)
const auto& cellResidual = residual[cell_idx]; * model.dofTotalVolume(cell_idx);
bool cnvViolated = false;
for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) const auto maxCnv = maxCNV(residual[cell_idx], pvValue);
{
using std::abs;
Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
cnvViolated = cnvViolated || (abs(CNV) > param_.tolerance_cnv_);
}
if (cnvViolated) const auto ix = (maxCnv > this->param_.tolerance_cnv_)
{ + (maxCnv > this->param_.tolerance_cnv_relaxed_);
errorPV += pvValue;
} splitPV[ix] += static_cast<double>(pvValue);
++cellCntPV[ix];
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilModel::ComputeCnvError() failed: ", grid_.comm()); OPM_END_PARALLEL_TRY_CATCH("BlackoilModel::characteriseCnvPvSplit() failed: ",
this->grid_.comm());
return grid_.comm().sum(errorPV); this->grid_.comm().sum(splitPV .data(), splitPV .size());
this->grid_.comm().sum(cellCntPV.data(), cellCntPV.size());
return cnvPvSplit;
} }
void updateTUNING(const Tuning& tuning) { void updateTUNING(const Tuning& tuning)
param_.tolerance_mb_ = tuning.XXXMBE; {
if ( terminal_output_ ) { this->param_.tolerance_mb_ = tuning.XXXMBE;
OpmLog::debug(fmt::format("Setting BlackoilModel mass balance limit (XXXMBE) to {:.2e}", tuning.XXXMBE));
if (terminal_output_) {
OpmLog::debug(fmt::format("Setting BlackoilModel mass "
"balance limit (XXXMBE) to {:.2e}",
tuning.XXXMBE));
} }
} }
@ -889,54 +922,93 @@ namespace Opm {
OPM_TIMEBLOCK(getReservoirConvergence); OPM_TIMEBLOCK(getReservoirConvergence);
using Vector = std::vector<Scalar>; using Vector = std::vector<Scalar>;
ConvergenceReport report{reportTime};
const int numComp = numEq; const int numComp = numEq;
Vector R_sum(numComp, 0.0 );
Vector maxCoeff(numComp, std::numeric_limits< Scalar >::lowest() ); Vector R_sum(numComp, Scalar{0});
Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest());
std::vector<int> maxCoeffCell(numComp, -1); std::vector<int> maxCoeffCell(numComp, -1);
const auto [ pvSumLocal, numAquiferPvSumLocal] =
const auto [pvSumLocal, numAquiferPvSumLocal] =
this->localConvergenceData(R_sum, maxCoeff, B_avg, maxCoeffCell); this->localConvergenceData(R_sum, maxCoeff, B_avg, maxCoeffCell);
// compute global sum and max of quantities // compute global sum and max of quantities
const auto [ pvSum, numAquiferPvSum ] = const auto& [pvSum, numAquiferPvSum] =
convergenceReduction(grid_.comm(), pvSumLocal, this->convergenceReduction(this->grid_.comm(),
numAquiferPvSumLocal, pvSumLocal,
R_sum, maxCoeff, B_avg); numAquiferPvSumLocal,
R_sum, maxCoeff, B_avg);
const auto cnvErrorPvFraction = report.setCnvPoreVolSplit(this->characteriseCnvPvSplit(B_avg, dt),
computeCnvErrorPv(B_avg, dt) pvSum - numAquiferPvSum);
/ (pvSum - numAquiferPvSum);
// For each iteration, we need to determine whether to use the relaxed tolerances. // For each iteration, we need to determine whether to use the
// To disable the usage of relaxed tolerances, you can set the relaxed tolerances as the strict tolerances. // relaxed tolerances. To disable the usage of relaxed
// tolerances, you can set the relaxed tolerances as the strict
// tolerances. If min_strict_mb_iter = -1 (default) we use a
// relaxed tolerance for the mass balance for the last
// iterations. For positive values we use the relaxed tolerance
// after the given number of iterations
const bool relax_final_iteration_mb =
(this->param_.min_strict_mb_iter_ < 0)
&& (iteration == maxIter);
// If min_strict_mb_iter = -1 (default) we use a relaxed tolerance for the mass balance for the last iterations const bool use_relaxed_mb = relax_final_iteration_mb ||
// For positive values we use the relaxed tolerance after the given number of iterations ((this->param_.min_strict_mb_iter_ >= 0) &&
const bool relax_final_iteration_mb = (param_.min_strict_mb_iter_ < 0) && (iteration == maxIter); (iteration >= this->param_.min_strict_mb_iter_));
const bool use_relaxed_mb = relax_final_iteration_mb || (param_.min_strict_mb_iter_ >= 0 && iteration >= param_.min_strict_mb_iter_);
// If min_strict_cnv_iter = -1 we use a relaxed tolerance for
// the cnv for the last iterations. For positive values we use
// the relaxed tolerance after the given number of iterations.
// We also use relaxed tolerances for cells with total
// pore-volume less than relaxed_max_pv_fraction_. Default
// value of relaxed_max_pv_fraction_ is 0.03
const bool relax_final_iteration_cnv =
(this->param_.min_strict_cnv_iter_ < 0)
&& (iteration == maxIter);
// If min_strict_cnv_iter = -1 we use a relaxed tolerance for the cnv for the last iterations const bool relax_iter_cnv = (this->param_.min_strict_cnv_iter_ >= 0)
// For positive values we use the relaxed tolerance after the given number of iterations && (iteration >= this->param_.min_strict_cnv_iter_);
// We also use relaxed tolerances for cells with total poro volume less than relaxed_max_pv_fraction_
// Default value of relaxed_max_pv_fraction_ is 0.03
const bool relax_final_iteration_cnv = (param_.min_strict_cnv_iter_ < 0) && (iteration == maxIter);
const bool relax_iter_cnv = param_.min_strict_cnv_iter_ >= 0 && iteration >= param_.min_strict_cnv_iter_;
const bool relax_pv_fraction_cnv = cnvErrorPvFraction < param_.relaxed_max_pv_fraction_;
const bool use_relaxed_cnv = relax_final_iteration_cnv || relax_pv_fraction_cnv || relax_iter_cnv;
if (relax_final_iteration_mb || relax_final_iteration_cnv) { // Note trailing parentheses here, just before the final
if ( terminal_output_ ) { // semicolon. This is an immediately invoked function
std::string message = "Number of newton iterations reached its maximum try to continue with relaxed tolerances:"; // expression which calculates a single boolean value.
if (relax_final_iteration_mb) const auto relax_pv_fraction_cnv =
message += fmt::format(" MB: {:.1e}", param_.tolerance_mb_relaxed_); [&report, pvSum, numAquiferPvSum, this]()
if (relax_final_iteration_cnv) {
message += fmt::format(" CNV: {:.1e}", param_.tolerance_cnv_relaxed_); const auto& cnvPvSplit = report.cnvPvSplit().first;
OpmLog::debug(message); // [1]: tol < cnv <= relaxed
// [2]: relaxed < cnv
return static_cast<Scalar>(cnvPvSplit[1] + cnvPvSplit[2]) <
this->param_.relaxed_max_pv_fraction_ * (pvSum - numAquiferPvSum);
}();
const bool use_relaxed_cnv = relax_final_iteration_cnv
|| relax_pv_fraction_cnv
|| relax_iter_cnv;
if ((relax_final_iteration_mb || relax_final_iteration_cnv) &&
this->terminal_output_)
{
std::string message =
"Number of newton iterations reached its maximum "
"try to continue with relaxed tolerances:";
if (relax_final_iteration_mb) {
message += fmt::format(" MB: {:.1e}", param_.tolerance_mb_relaxed_);
} }
if (relax_final_iteration_cnv) {
message += fmt::format(" CNV: {:.1e}", param_.tolerance_cnv_relaxed_);
}
OpmLog::debug(message);
} }
const Scalar tol_cnv = use_relaxed_cnv ? param_.tolerance_cnv_relaxed_ : param_.tolerance_cnv_;
const Scalar tol_mb = use_relaxed_mb ? param_.tolerance_mb_relaxed_ : param_.tolerance_mb_; const auto tol_cnv = use_relaxed_cnv ? param_.tolerance_cnv_relaxed_ : param_.tolerance_cnv_;
const auto tol_mb = use_relaxed_mb ? param_.tolerance_mb_relaxed_ : param_.tolerance_mb_;
// Finish computation // Finish computation
std::vector<Scalar> CNV(numComp); std::vector<Scalar> CNV(numComp);
@ -948,43 +1020,48 @@ namespace Opm {
residual_norms.push_back(CNV[compIdx]); residual_norms.push_back(CNV[compIdx]);
} }
// Create convergence report.
ConvergenceReport report{reportTime};
report.setPoreVolCnvViolationFraction(cnvErrorPvFraction, pvSum - numAquiferPvSum);
using CR = ConvergenceReport; using CR = ConvergenceReport;
for (int compIdx = 0; compIdx < numComp; ++compIdx) { for (int compIdx = 0; compIdx < numComp; ++compIdx) {
const Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] }; const Scalar res[2] = {
const CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance, mass_balance_residual[compIdx], CNV[compIdx],
CR::ReservoirFailure::Type::Cnv }; };
const Scalar tol[2] = { tol_mb, tol_cnv };
const CR::ReservoirFailure::Type types[2] = {
CR::ReservoirFailure::Type::MassBalance,
CR::ReservoirFailure::Type::Cnv,
};
const Scalar tol[2] = { tol_mb, tol_cnv, };
for (int ii : {0, 1}) { for (int ii : {0, 1}) {
if (std::isnan(res[ii])) { if (std::isnan(res[ii])) {
report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx}); report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
if ( terminal_output_ ) { if (this->terminal_output_) {
OpmLog::debug("NaN residual for " + this->compNames_.name(compIdx) + " equation."); OpmLog::debug("NaN residual for " + this->compNames_.name(compIdx) + " equation.");
} }
} else if (res[ii] > maxResidualAllowed()) { }
else if (res[ii] > maxResidualAllowed()) {
report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx}); report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
if ( terminal_output_ ) { if (this->terminal_output_) {
OpmLog::debug("Too large residual for " + this->compNames_.name(compIdx) + " equation."); OpmLog::debug("Too large residual for " + this->compNames_.name(compIdx) + " equation.");
} }
} else if (res[ii] < 0.0) { }
else if (res[ii] < 0.0) {
report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx}); report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
if ( terminal_output_ ) { if (this->terminal_output_) {
OpmLog::debug("Negative residual for " + this->compNames_.name(compIdx) + " equation."); OpmLog::debug("Negative residual for " + this->compNames_.name(compIdx) + " equation.");
} }
} else if (res[ii] > tol[ii]) { }
else if (res[ii] > tol[ii]) {
report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx}); report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
} }
report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii]); report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii]);
} }
} }
// Output of residuals. // Output of residuals.
if ( terminal_output_ ) if (this->terminal_output_) {
{
// Only rank 0 does print to std::cout // Only rank 0 does print to std::cout
if (iteration == 0) { if (iteration == 0) {
std::string msg = "Iter"; std::string msg = "Iter";
@ -993,25 +1070,32 @@ namespace Opm {
msg += this->compNames_.name(compIdx)[0]; msg += this->compNames_.name(compIdx)[0];
msg += ") "; msg += ") ";
} }
for (int compIdx = 0; compIdx < numComp; ++compIdx) { for (int compIdx = 0; compIdx < numComp; ++compIdx) {
msg += " CNV("; msg += " CNV(";
msg += this->compNames_.name(compIdx)[0]; msg += this->compNames_.name(compIdx)[0];
msg += ") "; msg += ") ";
} }
OpmLog::debug(msg); OpmLog::debug(msg);
} }
std::ostringstream ss; std::ostringstream ss;
const std::streamsize oprec = ss.precision(3); const std::streamsize oprec = ss.precision(3);
const std::ios::fmtflags oflags = ss.setf(std::ios::scientific); const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
ss << std::setw(4) << iteration; ss << std::setw(4) << iteration;
for (int compIdx = 0; compIdx < numComp; ++compIdx) { for (int compIdx = 0; compIdx < numComp; ++compIdx) {
ss << std::setw(11) << mass_balance_residual[compIdx]; ss << std::setw(11) << mass_balance_residual[compIdx];
} }
for (int compIdx = 0; compIdx < numComp; ++compIdx) { for (int compIdx = 0; compIdx < numComp; ++compIdx) {
ss << std::setw(11) << CNV[compIdx]; ss << std::setw(11) << CNV[compIdx];
} }
ss.precision(oprec); ss.precision(oprec);
ss.flags(oflags); ss.flags(oflags);
OpmLog::debug(ss.str()); OpmLog::debug(ss.str());
} }

View File

@ -33,6 +33,7 @@
#include <functional> #include <functional>
#include <iomanip> #include <iomanip>
#include <iterator> #include <iterator>
#include <limits>
#include <mutex> #include <mutex>
#include <numeric> #include <numeric>
#include <optional> #include <optional>
@ -57,8 +58,13 @@ namespace {
"ReportStep"s, "ReportStep"s,
"TimeStep"s, "TimeStep"s,
"Time"s, "Time"s,
"CnvErrPvFrac"s,
"Iteration"s, "Iteration"s,
"CnvPvFracConv"s,
"CnvPvFracRelax"s,
"CnvPvFracUnconv"s,
"CnvCellCntConv"s,
"CnvCellCntRelax"s,
"CnvCellCntUnconv"s,
}; };
} }
@ -135,6 +141,78 @@ namespace {
return { minColSize, headerSize }; return { minColSize, headerSize };
} }
void writeTimeColumns(std::ostream& os,
const Opm::ConvergenceOutputThread::ConvertToTimeUnits& convertTime,
const std::string::size_type firstColSize,
const int iter,
const Opm::ConvergenceReport& report,
const Opm::ConvergenceReportQueue::OutputRequest& request)
{
os << std::setw(firstColSize) << request.reportStep << ' '
<< std::setw(firstColSize) << request.currentStep << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< convertTime(report.reportTime()) << ' '
<< std::setw(firstColSize) << iter;
}
void writeCnvPvSplit(std::ostream& os,
const std::vector<double>::size_type expectedNumValues,
const std::string::size_type firstColSize,
const Opm::ConvergenceReport& report)
{
const auto& [splitPv, cellCnt] = report.cnvPvSplit();
if (splitPv.size() == expectedNumValues) {
for (const auto& pv : splitPv) {
os << ' ' << std::setprecision(4) << std::setw(firstColSize)
<< pv / report.eligiblePoreVolume();
}
}
else {
constexpr auto val = std::numeric_limits<double>::has_quiet_NaN
? std::numeric_limits<double>::quiet_NaN()
: -1.0;
for (auto i = 0*expectedNumValues; i < expectedNumValues; ++i) {
os << ' ' << std::setprecision(4) << std::setw(firstColSize) << val;
}
}
if (cellCnt.size() == expectedNumValues) {
for (const auto& cnt : cellCnt) {
os << ' ' << std::setw(firstColSize) << cnt;
}
}
else {
for (auto i = 0*expectedNumValues; i < expectedNumValues; ++i) {
os << ' ' << std::setw(firstColSize) << -1;
}
}
}
void writeReservoirConvergence(std::ostream& os,
const std::string::size_type colSize,
const Opm::ConvergenceReport& report)
{
for (const auto& metric : report.reservoirConvergence()) {
os << std::setprecision(4) << std::setw(colSize) << metric.value();
}
}
void writeWellConvergence(std::ostream& os,
const std::string::size_type colSize,
const Opm::ConvergenceReport& report)
{
os << std::right << std::setw(colSize)
<< (report.wellFailed() ? "FAIL" : "CONV");
if (report.wellFailed()) {
for (const auto& wf : report.wellFailures()) {
os << ' ' << to_string(wf);
}
}
}
void writeConvergenceRequest(std::ostream& os, void writeConvergenceRequest(std::ostream& os,
const Opm::ConvergenceOutputThread::ConvertToTimeUnits& convertTime, const Opm::ConvergenceOutputThread::ConvertToTimeUnits& convertTime,
const std::string::size_type firstColSize, const std::string::size_type firstColSize,
@ -143,28 +221,16 @@ namespace {
{ {
os.setf(std::ios_base::scientific); os.setf(std::ios_base::scientific);
const auto expectNumCnvSplit = std::vector<double>::size_type{3};
auto iter = 0; auto iter = 0;
for (const auto& report : request.reports) { for (const auto& report : request.reports) {
os << std::setw(firstColSize) << request.reportStep << ' ' writeTimeColumns(os, convertTime, firstColSize, iter, report, request);
<< std::setw(firstColSize) << request.currentStep << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< convertTime(report.reportTime()) << ' '
<< std::setprecision(4) << std::setw(firstColSize)
<< report.cnvViolatedPvFraction() << ' '
<< std::setw(firstColSize) << iter;
for (const auto& metric : report.reservoirConvergence()) { writeCnvPvSplit(os, expectNumCnvSplit, firstColSize, report);
os << std::setprecision(4) << std::setw(colSize) << metric.value();
}
os << std::right << std::setw(colSize) writeReservoirConvergence(os, colSize, report);
<< (report.wellFailed() ? "FAIL" : "CONV"); writeWellConvergence(os, colSize, report);
if (report.wellFailed()) {
for (const auto& wf : report.wellFailures()) {
os << ' ' << to_string(wf);
}
}
os << '\n'; os << '\n';

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2018 SINTEF Digital, Mathematics and Cybernetics. Copyright 2018 SINTEF Digital, Mathematics and Cybernetics.
Copyright 2018 Equinor. Copyright 2018, 2024 Equinor.
This file is part of the Open Porous Media project (OPM). This file is part of the Open Porous Media project (OPM).
@ -53,11 +53,19 @@ namespace Opm
NotANumber = 3, NotANumber = 3,
}; };
using CnvPvSplit = std::pair<
std::vector<double>,
std::vector<int>>;
class ReservoirFailure class ReservoirFailure
{ {
public: public:
enum struct Type { Invalid, MassBalance, Cnv }; enum struct Type { Invalid, MassBalance, Cnv };
// Default constructor needed for object serialisation. Don't
// use this for anything else.
ReservoirFailure() = default;
ReservoirFailure(Type t, Severity s, int phase) ReservoirFailure(Type t, Severity s, int phase)
: type_(t), severity_(s), phase_(phase) : type_(t), severity_(s), phase_(phase)
{} {}
@ -66,15 +74,29 @@ namespace Opm
Severity severity() const { return severity_; } Severity severity() const { return severity_; }
int phase() const { return phase_; } int phase() const { return phase_; }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->type_);
serializer(this->severity_);
serializer(this->phase_);
}
private: private:
Type type_; // Note to maintainers: If you change this list of data members,
Severity severity_; // then please update serializeOp() accordingly.
int phase_; Type type_ { Type::Invalid };
Severity severity_ { Severity::None };
int phase_ { -1 };
}; };
class ReservoirConvergenceMetric class ReservoirConvergenceMetric
{ {
public: public:
// Default constructor needed for object serialisation. Don't
// use this for anything else.
ReservoirConvergenceMetric() = default;
ReservoirConvergenceMetric(ReservoirFailure::Type t, int phase, double value) ReservoirConvergenceMetric(ReservoirFailure::Type t, int phase, double value)
: type_(t), phase_(phase), value_(value) : type_(t), phase_(phase), value_(value)
{} {}
@ -83,10 +105,20 @@ namespace Opm
int phase() const { return phase_; } int phase() const { return phase_; }
double value() const { return value_; } double value() const { return value_; }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->type_);
serializer(this->phase_);
serializer(this->value_);
}
private: private:
ReservoirFailure::Type type_; // Note to maintainers: If you change this list of data members,
int phase_; // then please update serializeOp() accordingly.
double value_; ReservoirFailure::Type type_ { ReservoirFailure::Type::Invalid };
int phase_ { -1 };
double value_ { 0.0 };
}; };
class WellFailure class WellFailure
@ -103,6 +135,10 @@ namespace Opm
WrongFlowDirection, WrongFlowDirection,
}; };
// Default constructor needed for object serialisation. Don't
// use this for anything else.
WellFailure() = default;
WellFailure(Type t, Severity s, int phase, const std::string& well_name) WellFailure(Type t, Severity s, int phase, const std::string& well_name)
: type_(t), severity_(s), phase_(phase), well_name_(well_name) : type_(t), severity_(s), phase_(phase), well_name_(well_name)
{} {}
@ -112,11 +148,22 @@ namespace Opm
int phase() const { return phase_; } int phase() const { return phase_; }
const std::string& wellName() const { return well_name_; } const std::string& wellName() const { return well_name_; }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->type_);
serializer(this->severity_);
serializer(this->phase_);
serializer(this->well_name_);
}
private: private:
Type type_; // Note to maintainers: If you change this list of data members,
Severity severity_; // then please update serializeOp() accordingly.
int phase_; Type type_ { Type::Invalid };
std::string well_name_; Severity severity_ { Severity::None };
int phase_ { -1 };
std::string well_name_ {};
}; };
// ----------- Mutating member functions ----------- // ----------- Mutating member functions -----------
@ -164,11 +211,11 @@ namespace Opm
wellGroupTargetsViolated_ = wellGroupTargetsViolated; wellGroupTargetsViolated_ = wellGroupTargetsViolated;
} }
void setPoreVolCnvViolationFraction(const double cnvErrorPvFraction, void setCnvPoreVolSplit(const CnvPvSplit& cnvPvSplit,
const double cnvErrorPvFractionDenom) const double eligiblePoreVolume)
{ {
this->pvFracCnvViol_ = cnvErrorPvFraction; this->cnvPvSplit_ = cnvPvSplit;
this->pvFracCnvViolDenom_ = cnvErrorPvFractionDenom; this->eligiblePoreVolume_ = eligiblePoreVolume;
} }
ConvergenceReport& operator+=(const ConvergenceReport& other) ConvergenceReport& operator+=(const ConvergenceReport& other)
@ -182,18 +229,16 @@ namespace Opm
assert(wellFailed() != well_failures_.empty()); assert(wellFailed() != well_failures_.empty());
wellGroupTargetsViolated_ = (wellGroupTargetsViolated_ || other.wellGroupTargetsViolated_); wellGroupTargetsViolated_ = (wellGroupTargetsViolated_ || other.wellGroupTargetsViolated_);
if ((this->pvFracCnvViolDenom_ > 0.0) || // Note regarding the CNV pore-volume split: We depend on the
(other.pvFracCnvViolDenom_ > 0.0)) // fact that the quantities have already been aggregated across
{ // all MPI ranks--see the implementation of member function
this->pvFracCnvViol_ = (this->pvFracCnvViol_ * this->pvFracCnvViolDenom_ + // BlackoilModel::getReservoirConvergence() for details--and are
other.pvFracCnvViol_ * other.pvFracCnvViolDenom_) // therefore equal on all ranks. Consequently, we simply assign
/ (this->pvFracCnvViolDenom_ + other.pvFracCnvViolDenom_); // 'other's values here, if it is non-empty. Empty splits
// typically come from well contributions.
this->pvFracCnvViolDenom_ += other.pvFracCnvViolDenom_; if (! other.cnvPvSplit_.first.empty()) {
} this->cnvPvSplit_ = other.cnvPvSplit_;
else { this->eligiblePoreVolume_ = other.eligiblePoreVolume_;
this->pvFracCnvViol_ = 0.0;
this->pvFracCnvViolDenom_ = 0.0;
} }
return *this; return *this;
@ -206,14 +251,14 @@ namespace Opm
return reportTime_; return reportTime_;
} }
double cnvViolatedPvFraction() const double eligiblePoreVolume() const
{ {
return this->pvFracCnvViol_; return this->eligiblePoreVolume_;
} }
std::pair<double, double> cnvViolatedPvFractionPack() const const CnvPvSplit& cnvPvSplit() const
{ {
return { this->pvFracCnvViol_, this->pvFracCnvViolDenom_ }; return this->cnvPvSplit_;
} }
bool converged() const bool converged() const
@ -262,16 +307,31 @@ namespace Opm
return s; return s;
} }
template <typename Serializer>
void serializeOp(Serializer& serializer)
{
serializer(this->reportTime_);
serializer(this->status_);
serializer(this->res_failures_);
serializer(this->well_failures_);
serializer(this->res_convergence_);
serializer(this->wellGroupTargetsViolated_);
serializer(this->cnvPvSplit_);
serializer(this->eligiblePoreVolume_);
}
private: private:
// ----------- Member variables ----------- // ----------- Member variables -----------
// Note to maintainers: If you change this list of data members,
// then please update serializeOp() accordingly.
double reportTime_; double reportTime_;
Status status_; Status status_;
std::vector<ReservoirFailure> res_failures_; std::vector<ReservoirFailure> res_failures_;
std::vector<WellFailure> well_failures_; std::vector<WellFailure> well_failures_;
std::vector<ReservoirConvergenceMetric> res_convergence_; std::vector<ReservoirConvergenceMetric> res_convergence_;
bool wellGroupTargetsViolated_; bool wellGroupTargetsViolated_;
double pvFracCnvViol_{}; CnvPvSplit cnvPvSplit_{};
double pvFracCnvViolDenom_{}; double eligiblePoreVolume_{};
}; };
struct StepReport struct StepReport

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2018, 2022 Equinor ASA. Copyright 2018, 2022, 2024 Equinor ASA.
Copyright 2018 SINTEF Digital, Mathematics and Cybernetics. Copyright 2018 SINTEF Digital, Mathematics and Cybernetics.
This file is part of the Open Porous Media project (OPM). This file is part of the Open Porous Media project (OPM).
@ -22,252 +22,154 @@
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp> #include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <utility> #include <opm/simulators/utils/ParallelCommunication.hpp>
#if HAVE_MPI #if HAVE_MPI
#include <mpi.h> #include <opm/common/utility/Serializer.hpp>
namespace #include <opm/simulators/utils/MPIPacker.hpp>
{
using Opm::ConvergenceReport; #include <opm/grid/common/CommunicationUtils.hpp>
void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f, #include <algorithm>
std::vector<char>& buf, #include <tuple>
int& offset, MPI_Comm mpi_communicator) #include <vector>
namespace {
/// Special purpose utility to collect each rank's local convergence
/// report object and distribute those to all ranks.
///
/// This particular feature could arguably be a part of the
/// Opm::MpiSerializer class, but we create it here first as a narrowly
/// scoped testing ground.
///
/// Inherits from Opm::Serializer<> in order to use that type's pack()
/// and unpack() member functions along with its internal 'm_buffer'
/// data member.
class CollectConvReports : private Opm::Serializer<Opm::Mpi::Packer>
{ {
int type = static_cast<int>(f.type()); public:
int severity = static_cast<int>(f.severity()); /// Constructor.
int phase = f.phase(); ///
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator); /// \param[in] packer Packing object to convert in-memory objects to
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator); /// a byte representation. Lifetime must exceed that of the
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator); /// CollectConvReports object.
} ///
/// \param[in] comm MPI communicator. Should be the same as the
/// internal communicator in the \p packer object.
explicit CollectConvReports(const Opm::Mpi::Packer& packer,
const Opm::Parallel::Communication comm)
: Opm::Serializer<Opm::Mpi::Packer> { packer }
, comm_ { comm }
{}
void packReservoirConvergenceMetric(const ConvergenceReport::ReservoirConvergenceMetric& m, /// Collect local convergence reports from each MPI rank.
std::vector<char>& buf, ///
int& offset, MPI_Comm mpi_communicator) /// \param[in] report Local convergence report from the current MPI
/// rank.
///
/// \return Collection of local convergence reports from all MPI
/// ranks in the current communicator. Report objects returned in
/// rank order.
std::vector<Opm::ConvergenceReport>
operator()(const Opm::ConvergenceReport& report);
private:
/// MPI communicator.
Opm::Parallel::Communication comm_;
/// Byte representation of local convergence report objects from all
/// ranks.
///
/// Only valid during a call to operator().
std::vector<char> rankBuffers_{};
/// Start pointers into rankBuffers_ for each rank's local
/// convergence report object.
std::vector<int> rankStart_{};
/// Reconstitute a convergence report from byte stream representation.
///
/// \param[in] rank MPI rank for which to reconstitute the local
/// convergence report object.
///
/// \param[in,out] report \p rank's local convergence report object.
/// On entry, a default constructed object usable as a destination
/// for deserialisation. On exit, a fully populated convergence
/// report object filled from the byte stream of \p rank.
void deserialise(const std::vector<int>::size_type rank,
Opm::ConvergenceReport& report);
};
std::vector<Opm::ConvergenceReport>
CollectConvReports::operator()(const Opm::ConvergenceReport& report)
{ {
int type = static_cast<int>(m.type()); auto rankReports = std::vector<Opm::ConvergenceReport>(this->comm_.size());
int phase = m.phase();
double value = m.value();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&value, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
}
void packWellFailure(const ConvergenceReport::WellFailure& f, this->pack(report);
std::vector<char>& buf,
int& offset, MPI_Comm mpi_communicator)
{
int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity());
int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator.
MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(const_cast<char*>(f.wellName().c_str()), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, mpi_communicator);
}
void packConvergenceReport(const ConvergenceReport& local_report, std::tie(this->rankBuffers_, this->rankStart_) =
std::vector<char>& buf, Opm::allGatherv(this->m_buffer, this->comm_);
int& offset, MPI_Comm mpi_communicator)
{
// Pack the data.
// Status will not be packed, it is possible to deduce from the other data.
double reportTime = local_report.reportTime();
MPI_Pack(&reportTime, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
{ auto rank = std::vector<int>::size_type{0};
const auto cnvPvFrac = local_report.cnvViolatedPvFractionPack(); for (auto& rankReport : rankReports) {
this->deserialise(rank++, rankReport);
MPI_Pack(&cnvPvFrac.first , 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&cnvPvFrac.second, 1, MPI_DOUBLE, buf.data(), buf.size(), &offset, mpi_communicator);
} }
// Reservoir failures. return rankReports;
const auto rf = local_report.reservoirFailures();
int num_rf = rf.size();
MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : rf) {
packReservoirFailure(f, buf, offset, mpi_communicator);
}
// Reservoir convergence metrics.
const auto rm = local_report.reservoirConvergence();
int num_rm = rm.size();
MPI_Pack(&num_rm, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& m : rm) {
packReservoirConvergenceMetric(m, buf, offset, mpi_communicator);
}
// Well failures.
const auto wf = local_report.wellFailures();
int num_wf = wf.size();
MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : wf) {
packWellFailure(f, buf, offset, mpi_communicator);
}
} }
int messageSize(const ConvergenceReport& local_report, MPI_Comm mpi_communicator) void CollectConvReports::deserialise(const std::vector<int>::size_type rank,
Opm::ConvergenceReport& report)
{ {
int int_pack_size = 0; auto begin = this->rankBuffers_.begin() + this->rankStart_[rank + 0];
MPI_Pack_size(1, MPI_INT, mpi_communicator, &int_pack_size); auto end = this->rankBuffers_.begin() + this->rankStart_[rank + 1];
int double_pack_size = 0;
MPI_Pack_size(1, MPI_DOUBLE, mpi_communicator, &double_pack_size); this->m_buffer.assign(begin, end);
const int num_rf = local_report.reservoirFailures().size(); this->m_packSize = std::distance(begin, end);
const int num_rm = local_report.reservoirConvergence().size();
const int num_wf = local_report.wellFailures().size(); this->unpack(report);
int wellnames_length = 0;
for (const auto& f : local_report.wellFailures()) {
wellnames_length += (f.wellName().size() + 1);
}
return (3 + 3*num_rf + 2*num_rm + 4*num_wf)*int_pack_size + (3 + 1*num_rm)*double_pack_size + wellnames_length;
} }
} // Anonymous namespace
ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int severity = -1;
int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase);
}
ConvergenceReport::ReservoirConvergenceMetric
unpackReservoirConvergenceMetric(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int phase = -1;
double value = -1.0;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &value, 1, MPI_DOUBLE, mpi_communicator);
return { static_cast<ConvergenceReport::ReservoirFailure::Type>(type), phase, value };
}
ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
int type = -1;
int severity = -1;
int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
int name_length = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &name_length, 1, MPI_INT, mpi_communicator);
std::vector<char> namechars(name_length);
MPI_Unpack(data, recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, mpi_communicator);
std::string name(namechars.data());
return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity),
phase,
name);
}
ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{
auto* data = const_cast<char*>(recv_buffer.data());
double reportTime{0.0};
MPI_Unpack(data, recv_buffer.size(), &offset, &reportTime, 1, MPI_DOUBLE, mpi_communicator);
ConvergenceReport cr{reportTime};
auto cnvPvFrac = std::pair { 0.0, 0.0 };
MPI_Unpack(data, recv_buffer.size(), &offset, &cnvPvFrac.first , 1, MPI_DOUBLE, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &cnvPvFrac.second, 1, MPI_DOUBLE, mpi_communicator);
cr.setPoreVolCnvViolationFraction(cnvPvFrac.first, cnvPvFrac.second);
int num_rf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, mpi_communicator);
for (int rf = 0; rf < num_rf; ++rf) {
ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset, mpi_communicator);
cr.setReservoirFailed(f);
}
int num_rm = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rm, 1, MPI_INT, mpi_communicator);
for (int rm = 0; rm < num_rm; ++rm) {
cr.setReservoirConvergenceMetric(unpackReservoirConvergenceMetric(recv_buffer, offset, mpi_communicator));
}
int num_wf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, mpi_communicator);
for (int wf = 0; wf < num_wf; ++wf) {
ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset, mpi_communicator);
cr.setWellFailed(f);
}
return cr;
}
ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ, MPI_Comm mpi_communicator)
{
ConvergenceReport cr;
const int num_processes = displ.size() - 1;
for (int process = 0; process < num_processes; ++process) {
int offset = displ[process];
cr += unpackSingleConvergenceReport(recv_buffer, offset, mpi_communicator);
assert(offset == displ[process + 1]);
}
return cr;
}
} // anonymous namespace
namespace Opm namespace Opm
{ {
/// Create a global convergence report combining local (per-process)
/// Create a global convergence report combining local /// reports.
/// (per-process) reports. ConvergenceReport
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication mpi_communicator) gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication mpi_communicator)
{ {
// Pack local report. if (mpi_communicator.size() == 1) {
int message_size = messageSize(local_report, mpi_communicator); // Sequential run, no communication needed.
std::vector<char> buffer(message_size); return local_report;
int offset = 0; }
packConvergenceReport(local_report, buffer, offset,mpi_communicator);
assert(offset == message_size);
// Get message sizes and create offset/displacement array for gathering. // Multi-process run (common case). Need object distribution.
int num_processes = -1; auto combinedReport = ConvergenceReport {};
MPI_Comm_size(mpi_communicator, &num_processes);
std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, mpi_communicator);
std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
// Gather. const auto packer = Mpi::Packer { mpi_communicator };
std::vector<char> recv_buffer(displ.back());
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
const_cast<char*>(recv_buffer.data()), message_sizes.data(),
displ.data(), MPI_PACKED,
mpi_communicator);
// Unpack. const auto rankReports =
ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ, mpi_communicator); CollectConvReports { packer, mpi_communicator }(local_report);
return global_report;
for (const auto& rankReport : rankReports) {
combinedReport += rankReport;
}
return combinedReport;
} }
} // namespace Opm } // namespace Opm
#else // HAVE_MPI #else // !HAVE_MPI
namespace Opm namespace Opm
{ {
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, ConvergenceReport
Parallel::Communication mpi_communicator [[maybe_unused]]) gatherConvergenceReport(const ConvergenceReport& local_report,
[[maybe_unused]] Parallel::Communication mpi_communicator)
{ {
return local_report; return local_report;
} }

View File

@ -30,9 +30,10 @@ namespace Opm
/// Create a global convergence report combining local /// Create a global convergence report combining local
/// (per-process) reports. /// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication communicator); ConvergenceReport
gatherConvergenceReport(const ConvergenceReport& local_report,
Parallel::Communication communicator);
} // namespace Opm } // namespace Opm
#endif // OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED #endif // OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED

View File

@ -106,6 +106,74 @@ BOOST_AUTO_TEST_CASE(EvenHaveFailure)
} }
} }
namespace {
class NProc_Is_Not
{
public:
explicit NProc_Is_Not(const int rejectNP)
: rejectNP_ { rejectNP }
{}
boost::test_tools::assertion_result
operator()(boost::unit_test::test_unit_id) const
{
auto comm = Opm::Parallel::Communication {
Dune::MPIHelper::getCommunicator()
};
if (comm.size() != this->rejectNP_) {
return true;
}
boost::test_tools::assertion_result response(false);
response.message() << "Number of MPI processes ("
<< comm.size()
<< ") matches rejected case.";
return response;
}
private:
int rejectNP_{};
};
} // Anonymous namespace
BOOST_AUTO_TEST_CASE(CNV_PV_SPLIT, * boost::unit_test::precondition(NProc_Is_Not{1}))
{
const auto cc = Dune::MPIHelper::getCommunication();
auto loc_rpt = Opm::ConvergenceReport {};
if (cc.rank() != 0) {
// All ranks are supposed to have the *same* pore-volume split of
// the CNV metrics, except if the pv split is empty.
const auto pvSplit = Opm::ConvergenceReport::CnvPvSplit {
{ 0.75, 0.2, 0.05, },
{ 1234, 56 , 7 , }
};
loc_rpt.setCnvPoreVolSplit(pvSplit, 9.876e5);
}
const auto cr = gatherConvergenceReport(loc_rpt, cc);
const auto& [pvFrac, cellCnt] = cr.cnvPvSplit();
BOOST_REQUIRE_EQUAL(pvFrac.size(), std::size_t{3});
BOOST_REQUIRE_EQUAL(cellCnt.size(), std::size_t{3});
BOOST_CHECK_CLOSE(cr.eligiblePoreVolume(), 9.876e5, 1.0e-8);
BOOST_CHECK_CLOSE(pvFrac[0], 0.75, 1.0e-8);
BOOST_CHECK_CLOSE(pvFrac[1], 0.20, 1.0e-8);
BOOST_CHECK_CLOSE(pvFrac[2], 0.05, 1.0e-8);
BOOST_CHECK_EQUAL(cellCnt[0], 1234);
BOOST_CHECK_EQUAL(cellCnt[1], 56);
BOOST_CHECK_EQUAL(cellCnt[2], 7);
}
int main(int argc, char** argv) int main(int argc, char** argv)
{ {
Dune::MPIHelper::instance(argc, argv); Dune::MPIHelper::instance(argc, argv);