Correctly parallelize computeFluidInPlace.

Its first implementation computed wrong results in parallel. With this commit
we noe have completely parallelized the computations and the results seem correct
for parallel runs with norne.
This commit is contained in:
Markus Blatt 2016-09-15 15:43:08 +02:00
parent e15f9bfb9c
commit 2c70f05d6b
3 changed files with 125 additions and 18 deletions

View File

@ -251,6 +251,9 @@ namespace Opm {
ReservoirState& reservoir_state,
WellState& well_state);
/// Return true if this is a parallel run.
bool isParallel() const;
/// Return true if output to cout is wanted.
bool terminalOutputEnabled() const;

View File

@ -251,7 +251,26 @@ namespace detail {
}
template <class Grid, class WellModel, class Implementation>
bool
BlackoilModelBase<Grid, WellModel, Implementation>::
isParallel() const
{
#if HAVE_MPI
if ( linsolver_.parallelInformation().type() !=
typeid(ParallelISTLInformation) )
{
return false;
}
else
{
const auto& comm =boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation()).communicator();
return comm.size() > 1;
}
#else
return false;
#endif
}
template <class Grid, class WellModel, class Implementation>
@ -2357,29 +2376,85 @@ namespace detail {
fip[4] = rv.value() * fip[pg];
}
const int dims = *std::max_element(fipnum.begin(), fipnum.end());
// For a parallel run this is just a local maximum and needs to be updated later
int dims = *std::max_element(fipnum.begin(), fipnum.end());
std::vector<V> values(dims, V::Zero(7));
for (int i = 0; i < 5; ++i) {
const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value();
V hcpv;
V pres;
if ( !isParallel() )
{
for (int i = 0; i < 5; ++i) {
for (int c = 0; c < nc; ++c) {
if (fipnum[c] != 0) {
values[fipnum[c]-1][i] += fip[i][c];
}
}
}
hcpv = V::Zero(dims);
pres = V::Zero(dims);
for (int c = 0; c < nc; ++c) {
if (fipnum[c] != 0) {
values[fipnum[c]-1][i] += fip[i][c];
hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c];
pres[fipnum[c]-1] += pv[c] * pressure.value()[c];
values[fipnum[c]-1][5] += pv[c];
values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c];
}
}
}
else
{
#if HAVE_MPI
// mask[c] is 1 if we need to compute something in parallel
const auto & pinfo =
boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation());
const auto& mask = pinfo.getOwnerMask();
auto comm = pinfo.communicator();
// Compute the global dims value and resize values accordingly.
dims = comm.max(dims);
values.resize(dims, V::Zero(7));
// compute PAV and PORV for every regions.
const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value();
V hcpv = V::Zero(dims);
V pres = V::Zero(dims);
for (int c = 0; c < nc; ++c) {
if (fipnum[c] != 0) {
hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c];
pres[fipnum[c]-1] += pv[c] * pressure.value()[c];
values[fipnum[c]-1][5] += pv[c];
values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c];
for (int i = 0; i < 5; ++i) {
for (int c = 0; c < nc; ++c) {
if (fipnum[c] != 0 && mask[c]) {
values[fipnum[c]-1][i] += fip[i][c];
}
}
}
hcpv = V::Zero(dims);
pres = V::Zero(dims);
for (int c = 0; c < nc; ++c) {
if (fipnum[c] != 0 && mask[c]) {
hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c];
pres[fipnum[c]-1] += pv[c] * pressure.value()[c];
values[fipnum[c]-1][5] += pv[c];
values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c];
}
}
// For the frankenstein branch we hopefully can turn values into a vanilla
// std::vector<double>, use some index magic above, use one communication
// to sum up the vector entries instead of looping over the regions.
for(int reg=0; reg < dims; ++reg)
{
comm.sum(values[reg].data(), values[reg].size());
}
comm.sum(hcpv.data(), hcpv.size());
comm.sum(pres.data(), pres.size());
#else
// This should never happen!
OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel");
#endif
}
// compute PAV and PORV for every regions.
for (int reg = 0; reg < dims; ++reg) {
if (hcpv[reg] != 0) {
values[reg][6] /= hcpv[reg];
@ -2389,7 +2464,6 @@ namespace detail {
}
return values;
}
} // namespace Opm

View File

@ -20,6 +20,7 @@
*/
#include <utility>
#include <functional>
#include <algorithm>
#include <locale>
#include <opm/parser/eclipse/EclipseState/Schedule/Events.hpp>
@ -697,9 +698,38 @@ namespace Opm
const V sg = pu.phase_used[BlackoilPhases::Vapour] ? V(s.col(BlackoilPhases::Vapour)) : V::Zero(nc);
const V hydrocarbon = so + sg;
const V p = Eigen::Map<const V>(& state.pressure()[0], nc);
totals[5] = geo_.poreVolume().sum();
totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa);
if ( ! is_parallel_run_ )
{
totals[5] = geo_.poreVolume().sum();
totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa);
}
else
{
#if HAVE_MPI
const auto & pinfo =
boost::any_cast<const ParallelISTLInformation&>(solver_.parallelInformation());
auto operators = std::make_tuple(Opm::Reduction::makeGlobalSumFunctor<double>(),
Opm::Reduction::makeGlobalSumFunctor<double>(),
Opm::Reduction::makeGlobalSumFunctor<double>());
auto pav_nom = p * geo_.poreVolume() * hydrocarbon;
auto pav_denom = geo_.poreVolume() * hydrocarbon;
// using ref cref to prevent copying
auto inputs = std::make_tuple(std::cref(geo_.poreVolume()),
std::cref(pav_nom), std::cref(pav_denom));
std::tuple<double, double, double> results(0.0, 0.0, 0.0);
pinfo.computeReduction(inputs, operators, results);
using std::get;
totals[5] = get<0>(results);
totals[6] = unit::convert::to(get<1>(results)/get<2>(results),
unit::barsa);
#else
// This should never happen!
OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel");
#endif
}
return totals;
}