mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #829 from blattms/parallelize-computeFluidInPlace
Correctly parallelize computeFluidInPlace
This commit is contained in:
@@ -251,6 +251,9 @@ namespace Opm {
|
||||
ReservoirState& reservoir_state,
|
||||
WellState& well_state);
|
||||
|
||||
/// Return true if this is a parallel run.
|
||||
bool isParallel() const;
|
||||
|
||||
/// Return true if output to cout is wanted.
|
||||
bool terminalOutputEnabled() const;
|
||||
|
||||
|
||||
@@ -251,7 +251,26 @@ namespace detail {
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <class Grid, class WellModel, class Implementation>
|
||||
bool
|
||||
BlackoilModelBase<Grid, WellModel, Implementation>::
|
||||
isParallel() const
|
||||
{
|
||||
#if HAVE_MPI
|
||||
if ( linsolver_.parallelInformation().type() !=
|
||||
typeid(ParallelISTLInformation) )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto& comm =boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation()).communicator();
|
||||
return comm.size() > 1;
|
||||
}
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
template <class Grid, class WellModel, class Implementation>
|
||||
@@ -2357,8 +2376,16 @@ namespace detail {
|
||||
fip[4] = rv.value() * fip[pg];
|
||||
}
|
||||
|
||||
const int dims = *std::max_element(fipnum.begin(), fipnum.end());
|
||||
// For a parallel run this is just a local maximum and needs to be updated later
|
||||
int dims = *std::max_element(fipnum.begin(), fipnum.end());
|
||||
std::vector<V> values(dims, V::Zero(7));
|
||||
|
||||
const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value();
|
||||
V hcpv;
|
||||
V pres;
|
||||
|
||||
if ( !isParallel() )
|
||||
{
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
for (int c = 0; c < nc; ++c) {
|
||||
if (fipnum[c] != 0) {
|
||||
@@ -2367,10 +2394,9 @@ namespace detail {
|
||||
}
|
||||
}
|
||||
|
||||
// compute PAV and PORV for every regions.
|
||||
const V hydrocarbon = saturation[Oil].value() + saturation[Gas].value();
|
||||
V hcpv = V::Zero(nc);
|
||||
V pres = V::Zero(nc);
|
||||
hcpv = V::Zero(dims);
|
||||
pres = V::Zero(dims);
|
||||
|
||||
for (int c = 0; c < nc; ++c) {
|
||||
if (fipnum[c] != 0) {
|
||||
hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c];
|
||||
@@ -2379,7 +2405,56 @@ namespace detail {
|
||||
values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c];
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if HAVE_MPI
|
||||
// mask[c] is 1 if we need to compute something in parallel
|
||||
const auto & pinfo =
|
||||
boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation());
|
||||
const auto& mask = pinfo.getOwnerMask();
|
||||
auto comm = pinfo.communicator();
|
||||
// Compute the global dims value and resize values accordingly.
|
||||
dims = comm.max(dims);
|
||||
values.resize(dims, V::Zero(7));
|
||||
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
for (int c = 0; c < nc; ++c) {
|
||||
if (fipnum[c] != 0 && mask[c]) {
|
||||
values[fipnum[c]-1][i] += fip[i][c];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hcpv = V::Zero(dims);
|
||||
pres = V::Zero(dims);
|
||||
|
||||
for (int c = 0; c < nc; ++c) {
|
||||
if (fipnum[c] != 0 && mask[c]) {
|
||||
hcpv[fipnum[c]-1] += pv[c] * hydrocarbon[c];
|
||||
pres[fipnum[c]-1] += pv[c] * pressure.value()[c];
|
||||
values[fipnum[c]-1][5] += pv[c];
|
||||
values[fipnum[c]-1][6] += pv[c] * pressure.value()[c] * hydrocarbon[c];
|
||||
}
|
||||
}
|
||||
|
||||
// For the frankenstein branch we hopefully can turn values into a vanilla
|
||||
// std::vector<double>, use some index magic above, use one communication
|
||||
// to sum up the vector entries instead of looping over the regions.
|
||||
for(int reg=0; reg < dims; ++reg)
|
||||
{
|
||||
comm.sum(values[reg].data(), values[reg].size());
|
||||
}
|
||||
|
||||
comm.sum(hcpv.data(), hcpv.size());
|
||||
comm.sum(pres.data(), pres.size());
|
||||
#else
|
||||
// This should never happen!
|
||||
OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel");
|
||||
#endif
|
||||
}
|
||||
|
||||
// compute PAV and PORV for every regions.
|
||||
for (int reg = 0; reg < dims; ++reg) {
|
||||
if (hcpv[reg] != 0) {
|
||||
values[reg][6] /= hcpv[reg];
|
||||
@@ -2389,7 +2464,6 @@ namespace detail {
|
||||
}
|
||||
|
||||
return values;
|
||||
|
||||
}
|
||||
|
||||
} // namespace Opm
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
*/
|
||||
|
||||
#include <utility>
|
||||
#include <functional>
|
||||
#include <algorithm>
|
||||
#include <locale>
|
||||
#include <opm/parser/eclipse/EclipseState/Schedule/Events.hpp>
|
||||
@@ -697,8 +698,37 @@ namespace Opm
|
||||
const V sg = pu.phase_used[BlackoilPhases::Vapour] ? V(s.col(BlackoilPhases::Vapour)) : V::Zero(nc);
|
||||
const V hydrocarbon = so + sg;
|
||||
const V p = Eigen::Map<const V>(& state.pressure()[0], nc);
|
||||
if ( ! is_parallel_run_ )
|
||||
{
|
||||
totals[5] = geo_.poreVolume().sum();
|
||||
totals[6] = unit::convert::to((p * geo_.poreVolume() * hydrocarbon).sum() / ((geo_.poreVolume() * hydrocarbon).sum()), unit::barsa);
|
||||
}
|
||||
else
|
||||
{
|
||||
#if HAVE_MPI
|
||||
const auto & pinfo =
|
||||
boost::any_cast<const ParallelISTLInformation&>(solver_.parallelInformation());
|
||||
auto operators = std::make_tuple(Opm::Reduction::makeGlobalSumFunctor<double>(),
|
||||
Opm::Reduction::makeGlobalSumFunctor<double>(),
|
||||
Opm::Reduction::makeGlobalSumFunctor<double>());
|
||||
auto pav_nom = p * geo_.poreVolume() * hydrocarbon;
|
||||
auto pav_denom = geo_.poreVolume() * hydrocarbon;
|
||||
|
||||
// using ref cref to prevent copying
|
||||
auto inputs = std::make_tuple(std::cref(geo_.poreVolume()),
|
||||
std::cref(pav_nom), std::cref(pav_denom));
|
||||
std::tuple<double, double, double> results(0.0, 0.0, 0.0);
|
||||
|
||||
pinfo.computeReduction(inputs, operators, results);
|
||||
using std::get;
|
||||
totals[5] = get<0>(results);
|
||||
totals[6] = unit::convert::to(get<1>(results)/get<2>(results),
|
||||
unit::barsa);
|
||||
#else
|
||||
// This should never happen!
|
||||
OPM_THROW(std::logic_error, "HAVE_MPI should be defined if we are running in parallel");
|
||||
#endif
|
||||
}
|
||||
|
||||
return totals;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user