From e15f9bfb9c22fafdd559680fe12c0518ccb8bb1d Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 15 Sep 2016 15:35:27 +0200 Subject: [PATCH 1/2] Save space in computeFluidAndSpace. Both hcpv and res will be used to save only dims elements. As dims will most likely be much smaller than the number of cells, we only allocate containers of size dims with this commit. --- opm/autodiff/BlackoilModelBase_impl.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opm/autodiff/BlackoilModelBase_impl.hpp b/opm/autodiff/BlackoilModelBase_impl.hpp index 84abb664a..5a68471c2 100644 --- a/opm/autodiff/BlackoilModelBase_impl.hpp +++ b/opm/autodiff/BlackoilModelBase_impl.hpp @@ -2369,8 +2369,8 @@ namespace detail { // compute PAV and PORV for every regions. const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value(); - V hcpv = V::Zero(nc); - V pres = V::Zero(nc); + V hcpv = V::Zero(dims); + V pres = V::Zero(dims); for (int c = 0; c < nc; ++c) { if (fipnum[c] != 0) { hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; From 2c70f05d6b2821d223a2c1b91a8c7f58ecec87cf Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 15 Sep 2016 15:43:08 +0200 Subject: [PATCH 2/2] Correctly parallelize computeFluidInPlace. Its first implementation computed wrong results in parallel. With this commit we noe have completely parallelized the computations and the results seem correct for parallel runs with norne. --- opm/autodiff/BlackoilModelBase.hpp | 3 + opm/autodiff/BlackoilModelBase_impl.hpp | 104 ++++++++++++++++++++---- opm/autodiff/SimulatorBase_impl.hpp | 36 +++++++- 3 files changed, 125 insertions(+), 18 deletions(-) diff --git a/opm/autodiff/BlackoilModelBase.hpp b/opm/autodiff/BlackoilModelBase.hpp index d496c7bb8..ecac81179 100644 --- a/opm/autodiff/BlackoilModelBase.hpp +++ b/opm/autodiff/BlackoilModelBase.hpp @@ -251,6 +251,9 @@ namespace Opm { ReservoirState& reservoir_state, WellState& well_state); + /// Return true if this is a parallel run. + bool isParallel() const; + /// Return true if output to cout is wanted. bool terminalOutputEnabled() const; diff --git a/opm/autodiff/BlackoilModelBase_impl.hpp b/opm/autodiff/BlackoilModelBase_impl.hpp index 5a68471c2..b3b7db557 100644 --- a/opm/autodiff/BlackoilModelBase_impl.hpp +++ b/opm/autodiff/BlackoilModelBase_impl.hpp @@ -251,7 +251,26 @@ namespace detail { } - + template + bool + BlackoilModelBase:: + isParallel() const + { +#if HAVE_MPI + if ( linsolver_.parallelInformation().type() != + typeid(ParallelISTLInformation) ) + { + return false; + } + else + { + const auto& comm =boost::any_cast(linsolver_.parallelInformation()).communicator(); + return comm.size() > 1; + } +#else + return false; +#endif + } template @@ -2357,29 +2376,85 @@ namespace detail { fip[4] = rv.value() * fip[pg]; } - const int dims = *std::max_element(fipnum.begin(), fipnum.end()); + // For a parallel run this is just a local maximum and needs to be updated later + int dims = *std::max_element(fipnum.begin(), fipnum.end()); std::vector values(dims, V::Zero(7)); - for (int i = 0; i < 5; ++i) { + + const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value(); + V hcpv; + V pres; + + if ( !isParallel() ) + { + for (int i = 0; i < 5; ++i) { + for (int c = 0; c < nc; ++c) { + if (fipnum[c] != 0) { + values[fipnum[c]-1][i] += fip[i][c]; + } + } + } + + hcpv = V::Zero(dims); + pres = V::Zero(dims); + for (int c = 0; c < nc; ++c) { if (fipnum[c] != 0) { - values[fipnum[c]-1][i] += fip[i][c]; + hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; + pres[fipnum[c]-1] += pv[c] * pressure.value()[c]; + values[fipnum[c]-1][5] += pv[c]; + values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c]; } } } + else + { +#if HAVE_MPI + // mask[c] is 1 if we need to compute something in parallel + const auto & pinfo = + boost::any_cast(linsolver_.parallelInformation()); + const auto& mask = pinfo.getOwnerMask(); + auto comm = pinfo.communicator(); + // Compute the global dims value and resize values accordingly. + dims = comm.max(dims); + values.resize(dims, V::Zero(7)); - // compute PAV and PORV for every regions. - const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value(); - V hcpv = V::Zero(dims); - V pres = V::Zero(dims); - for (int c = 0; c < nc; ++c) { - if (fipnum[c] != 0) { - hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; - pres[fipnum[c]-1] += pv[c] * pressure.value()[c]; - values[fipnum[c]-1][5] += pv[c]; - values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c]; + for (int i = 0; i < 5; ++i) { + for (int c = 0; c < nc; ++c) { + if (fipnum[c] != 0 && mask[c]) { + values[fipnum[c]-1][i] += fip[i][c]; + } + } } + + hcpv = V::Zero(dims); + pres = V::Zero(dims); + + for (int c = 0; c < nc; ++c) { + if (fipnum[c] != 0 && mask[c]) { + hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c]; + pres[fipnum[c]-1] += pv[c] * pressure.value()[c]; + values[fipnum[c]-1][5] += pv[c]; + values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c]; + } + } + + // For the frankenstein branch we hopefully can turn values into a vanilla + // std::vector, use some index magic above, use one communication + // to sum up the vector entries instead of looping over the regions. + for(int reg=0; reg < dims; ++reg) + { + comm.sum(values[reg].data(), values[reg].size()); + } + + comm.sum(hcpv.data(), hcpv.size()); + comm.sum(pres.data(), pres.size()); +#else + // This should never happen! + OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel"); +#endif } + // compute PAV and PORV for every regions. for (int reg = 0; reg < dims; ++reg) { if (hcpv[reg] != 0) { values[reg][6] /= hcpv[reg]; @@ -2389,7 +2464,6 @@ namespace detail { } return values; - } } // namespace Opm diff --git a/opm/autodiff/SimulatorBase_impl.hpp b/opm/autodiff/SimulatorBase_impl.hpp index 96c16fcc5..bb64aee25 100644 --- a/opm/autodiff/SimulatorBase_impl.hpp +++ b/opm/autodiff/SimulatorBase_impl.hpp @@ -20,6 +20,7 @@ */ #include +#include #include #include #include @@ -697,9 +698,38 @@ namespace Opm const V sg = pu.phase_used[BlackoilPhases::Vapour] ? V(s.col(BlackoilPhases::Vapour)) : V::Zero(nc); const V hydrocarbon = so + sg; const V p = Eigen::Map(& state.pressure()[0], nc); - totals[5] = geo_.poreVolume().sum(); - totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa); - + if ( ! is_parallel_run_ ) + { + totals[5] = geo_.poreVolume().sum(); + totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa); + } + else + { +#if HAVE_MPI + const auto & pinfo = + boost::any_cast(solver_.parallelInformation()); + auto operators = std::make_tuple(Opm::Reduction::makeGlobalSumFunctor(), + Opm::Reduction::makeGlobalSumFunctor(), + Opm::Reduction::makeGlobalSumFunctor()); + auto pav_nom = p * geo_.poreVolume() * hydrocarbon; + auto pav_denom = geo_.poreVolume() * hydrocarbon; + + // using ref cref to prevent copying + auto inputs = std::make_tuple(std::cref(geo_.poreVolume()), + std::cref(pav_nom), std::cref(pav_denom)); + std::tuple results(0.0, 0.0, 0.0); + + pinfo.computeReduction(inputs, operators, results); + using std::get; + totals[5] = get<0>(results); + totals[6] = unit::convert::to(get<1>(results)/get<2>(results), + unit::barsa); +#else + // This should never happen! + OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel"); +#endif + } + return totals; }