diff --git a/opm/autodiff/BlackoilModelBase.hpp b/opm/autodiff/BlackoilModelBase.hpp index d496c7bb8..ecac81179 100644 --- a/opm/autodiff/BlackoilModelBase.hpp +++ b/opm/autodiff/BlackoilModelBase.hpp @@ -251,6 +251,9 @@ namespace Opm { ReservoirState& reservoir_state, WellState& well_state); + /// Return true if this is a parallel run. + bool isParallel() const; + /// Return true if output to cout is wanted. bool terminalOutputEnabled() const; diff --git a/opm/autodiff/BlackoilModelBase_impl.hpp b/opm/autodiff/BlackoilModelBase_impl.hpp index 7fc530dc3..ed8d6003c 100644 --- a/opm/autodiff/BlackoilModelBase_impl.hpp +++ b/opm/autodiff/BlackoilModelBase_impl.hpp @@ -251,7 +251,26 @@ namespace detail { } - + template + bool + BlackoilModelBase:: + isParallel() const + { +#if HAVE_MPI + if ( linsolver_.parallelInformation().type() != + typeid(ParallelISTLInformation) ) + { + return false; + } + else + { + const auto& comm =boost::any_cast(linsolver_.parallelInformation()).communicator(); + return comm.size() > 1; + } +#else + return false; +#endif + } template @@ -2357,29 +2376,85 @@ namespace detail { fip[4] = rv.value() * fip[pg]; } - const int dims = *std::max_element(fipnum.begin(), fipnum.end()); + // For a parallel run this is just a local maximum and needs to be updated later + int dims = *std::max_element(fipnum.begin(), fipnum.end()); std::vector values(dims, V::Zero(7)); - for (int i = 0; i < 5; ++i) { + + const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value(); + V hcpv; + V pres; + + if ( !isParallel() ) + { + for (int i = 0; i < 5; ++i) { + for (int c = 0; c < nc; ++c) { + if (fipnum[c] != 0) { + values[fipnum[c]-1][i] += fip[i][c]; + } + } + } + + hcpv = V::Zero(dims); + pres = V::Zero(dims); + for (int c = 0; c < nc; ++c) { if (fipnum[c] != 0) { - values[fipnum[c]-1][i] += fip[i][c]; + hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; + pres[fipnum[c]-1] += pv[c] * pressure.value()[c]; + values[fipnum[c]-1][5] += pv[c]; + values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c]; } } } + else + { +#if HAVE_MPI + // mask[c] is 1 if we need to compute something in parallel + const auto & pinfo = + boost::any_cast(linsolver_.parallelInformation()); + const auto& mask = pinfo.getOwnerMask(); + auto comm = pinfo.communicator(); + // Compute the global dims value and resize values accordingly. + dims = comm.max(dims); + values.resize(dims, V::Zero(7)); - // compute PAV and PORV for every regions. - const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value(); - V hcpv = V::Zero(nc); - V pres = V::Zero(nc); - for (int c = 0; c < nc; ++c) { - if (fipnum[c] != 0) { - hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; - pres[fipnum[c]-1] += pv[c] * pressure.value()[c]; - values[fipnum[c]-1][5] += pv[c]; - values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c]; + for (int i = 0; i < 5; ++i) { + for (int c = 0; c < nc; ++c) { + if (fipnum[c] != 0 && mask[c]) { + values[fipnum[c]-1][i] += fip[i][c]; + } + } } + + hcpv = V::Zero(dims); + pres = V::Zero(dims); + + for (int c = 0; c < nc; ++c) { + if (fipnum[c] != 0 && mask[c]) { + hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; + pres[fipnum[c]-1] += pv[c] * pressure.value()[c]; + values[fipnum[c]-1][5] += pv[c]; + values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c]; + } + } + + // For the frankenstein branch we hopefully can turn values into a vanilla + // std::vector, use some index magic above, use one communication + // to sum up the vector entries instead of looping over the regions. + for(int reg=0; reg < dims; ++reg) + { + comm.sum(values[reg].data(), values[reg].size()); + } + + comm.sum(hcpv.data(), hcpv.size()); + comm.sum(pres.data(), pres.size()); +#else + // This should never happen! + OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel"); +#endif } + // compute PAV and PORV for every regions. for (int reg = 0; reg < dims; ++reg) { if (hcpv[reg] != 0) { values[reg][6] /= hcpv[reg]; @@ -2389,7 +2464,6 @@ namespace detail { } return values; - } } // namespace Opm diff --git a/opm/autodiff/SimulatorBase_impl.hpp b/opm/autodiff/SimulatorBase_impl.hpp index 96c16fcc5..bb64aee25 100644 --- a/opm/autodiff/SimulatorBase_impl.hpp +++ b/opm/autodiff/SimulatorBase_impl.hpp @@ -20,6 +20,7 @@ */ #include +#include #include #include #include @@ -697,9 +698,38 @@ namespace Opm const V sg = pu.phase_used[BlackoilPhases::Vapour] ? V(s.col(BlackoilPhases::Vapour)) : V::Zero(nc); const V hydrocarbon = so + sg; const V p = Eigen::Map(& state.pressure()[0], nc); - totals[5] = geo_.poreVolume().sum(); - totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa); - + if ( ! is_parallel_run_ ) + { + totals[5] = geo_.poreVolume().sum(); + totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa); + } + else + { +#if HAVE_MPI + const auto & pinfo = + boost::any_cast(solver_.parallelInformation()); + auto operators = std::make_tuple(Opm::Reduction::makeGlobalSumFunctor(), + Opm::Reduction::makeGlobalSumFunctor(), + Opm::Reduction::makeGlobalSumFunctor()); + auto pav_nom = p * geo_.poreVolume() * hydrocarbon; + auto pav_denom = geo_.poreVolume() * hydrocarbon; + + // using ref cref to prevent copying + auto inputs = std::make_tuple(std::cref(geo_.poreVolume()), + std::cref(pav_nom), std::cref(pav_denom)); + std::tuple results(0.0, 0.0, 0.0); + + pinfo.computeReduction(inputs, operators, results); + using std::get; + totals[5] = get<0>(results); + totals[6] = unit::convert::to(get<1>(results)/get<2>(results), + unit::barsa); +#else + // This should never happen! + OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel"); +#endif + } + return totals; }