Make the parallel reduction when applying the Wells.

The B matrix is basically a component-wise multiplication
with a vector followed by a parallel reduction. We do that
reduction to all ranks computing for the well to save the
broadcast when applying C^T.
This commit is contained in:
Markus Blatt 2020-10-09 15:09:28 +02:00
parent 3996967344
commit 8ee58096ba
11 changed files with 188 additions and 13 deletions

View File

@ -416,6 +416,16 @@ public:
int outputInterval = EWOMS_GET_PARAM(TypeTag, int, EclOutputInterval);
if (outputInterval >= 0)
schedule().restart().overrideRestartWriteInterval(outputInterval);
// Initialize parallelWells with all local wells
const auto& schedule_wells = schedule().getWellsatEnd();
parallelWells_.reserve(schedule_wells.size());
for (const auto& well: schedule_wells)
{
parallelWells_.emplace_back(well.name(), true);
}
std::sort(parallelWells_.begin(), parallelWells_.end());
}
/*!

View File

@ -280,6 +280,7 @@ namespace Opm {
std::vector< WellProdIndexCalculator > prod_index_calc_;
std::vector< ParallelWellInfo > parallel_well_info_;
std::vector< ParallelWellInfo* > local_parallel_well_info_;
bool wells_active_;
@ -360,7 +361,7 @@ namespace Opm {
/// \param timeStepIdx The index of the time step.
/// \param[out] globalNumWells the number of wells globally.
std::vector< Well > getLocalNonshutWells(const int timeStepIdx,
int& globalNumWells) const;
int& globalNumWells);
// compute the well fluxes and assemble them in to the reservoir equations as source terms
// and in the well equations.

View File

@ -217,11 +217,23 @@ namespace Opm {
template<typename TypeTag>
std::vector< Well >
BlackoilWellModel<TypeTag>::
getLocalNonshutWells(const int timeStepIdx, int& globalNumWells) const
getLocalNonshutWells(const int timeStepIdx, int& globalNumWells)
{
auto w = schedule().getWells(timeStepIdx);
globalNumWells = w.size();
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end());
local_parallel_well_info_.clear();
local_parallel_well_info_.reserve(w.size());
for (const auto& well : w)
{
auto wellPair = std::make_pair(well.name(), true);
auto pwell = std::lower_bound(parallel_well_info_.begin(),
parallel_well_info_.end(),
wellPair);
assert(pwell != parallel_well_info_.end() &&
*pwell == wellPair);
local_parallel_well_info_.push_back(&(*pwell));
}
return w;
}
@ -802,6 +814,7 @@ namespace Opm {
const auto& perf_data = this->well_perf_data_[wellID];
return std::make_unique<WellType>(this->wells_ecl_[wellID],
*local_parallel_well_info_[wellID],
time_step,
this->param_,
*this->rateConverter_,

View File

@ -98,7 +98,9 @@ namespace Opm
// TODO: for now, we only use one type to save some implementation efforts, while improve later.
typedef DenseAd::Evaluation<double, /*size=*/numEq + numWellEq> EvalWell;
MultisegmentWell(const Well& well, const int time_step,
MultisegmentWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,

View File

@ -31,7 +31,9 @@ namespace Opm
template <typename TypeTag>
MultisegmentWell<TypeTag>::
MultisegmentWell(const Well& well, const int time_step,
MultisegmentWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -40,7 +42,7 @@ namespace Opm
const int index_of_well,
const int first_perf_index,
const std::vector<PerforationData>& perf_data)
: Base(well, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
: Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
, segment_perforations_(numberOfSegments())
, segment_inlets_(numberOfSegments())
, cell_perforation_depth_diffs_(number_of_perforations_, 0.0)

View File

@ -31,6 +31,7 @@
#include <opm/simulators/wells/RateConverter.hpp>
#include <opm/simulators/wells/WellInterface.hpp>
#include <opm/simulators/wells/WellProdIndexCalculator.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/models/blackoil/blackoilpolymermodules.hh>
#include <opm/models/blackoil/blackoilsolventmodules.hh>
@ -158,7 +159,9 @@ namespace Opm
using Base::contiBrineEqIdx;
static const int contiEnergyEqIdx = Indices::contiEnergyEqIdx;
StandardWell(const Well& well, const int time_step,
StandardWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -377,6 +380,9 @@ namespace Opm
// diagonal matrix for the well
DiagMatWell invDuneD_;
// Wrapper for the parallel application of B for distributed wells
wellhelpers::ParallelStandardWellB<Scalar> parallelB_;
// several vector used in the matrix calculation
mutable BVectorWell Bx_;
mutable BVectorWell invDrw_;

View File

@ -33,7 +33,9 @@ namespace Opm
template<typename TypeTag>
StandardWell<TypeTag>::
StandardWell(const Well& well, const int time_step,
StandardWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -42,9 +44,10 @@ namespace Opm
const int index_of_well,
const int first_perf_index,
const std::vector<PerforationData>& perf_data)
: Base(well, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
: Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
, perf_densities_(number_of_perforations_)
, perf_pressure_diffs_(number_of_perforations_)
, parallelB_(duneB_, pw_info)
, F0_(numWellConservationEq)
, ipr_a_(number_of_phases_)
, ipr_b_(number_of_phases_)
@ -657,6 +660,9 @@ namespace Opm
// Update the connection
connectionRates_ = connectionRates;
// accumulate resWell_ and invDuneD_ in parallel to get effects of all perforations (might be distributed)
wellhelpers::sumDistributedWellEntries(invDuneD_[0][0], resWell_[0],
this->parallel_well_info_->communication());
// add vol * dF/dt + Q to the well equations;
for (int componentIdx = 0; componentIdx < numWellConservationEq; ++componentIdx) {
// TODO: following the development in MSW, we need to convert the volume of the wellbore to be surface volume
@ -2475,7 +2481,8 @@ namespace Opm
assert( invDrw_.size() == invDuneD_.N() );
// Bx_ = duneB_ * x
duneB_.mv(x, Bx_);
parallelB_.mv(x, Bx_);
// invDBx = invDuneD_ * Bx_
// TODO: with this, we modified the content of the invDrw_.
// Is it necessary to do this to save some memory?
@ -2574,7 +2581,7 @@ namespace Opm
BVectorWell resWell = resWell_;
// resWell = resWell - B * x
duneB_.mmv(x, resWell);
parallelB_.mmv(x, resWell);
// xw = D^-1 * resWell
invDuneD_.mv(resWell, xw);
}

View File

@ -1,6 +1,7 @@
/*
Copyright 2016 SINTEF ICT, Applied Mathematics.
Copyright 2016 Statoil ASA.
Copyright 2020 OPM-OP AS.
This file is part of the Open Porous Media project (OPM).
@ -22,6 +23,12 @@
#ifndef OPM_WELLHELPERS_HEADER_INCLUDED
#define OPM_WELLHELPERS_HEADER_INCLUDED
#include <opm/common/OpmLog/OpmLog.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <dune/istl/bcrsmatrix.hh>
#include <dune/common/dynmatrix.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <vector>
@ -33,6 +40,93 @@ namespace Opm {
namespace wellhelpers
{
/// \brief A wrapper around the B matrix for distributed wells
///
/// For standard wells the B matrix, is basically a multiplication
/// of the equation of the perforated cells followed by a reduction
/// (summation) of these to the well equations.
///
/// This class does that in the functions mv and mmv (from the DUNE
/// matrix interface.
///
/// \tparam Scalar The scalar used for the computation.
template<typename Scalar>
class ParallelStandardWellB
{
public:
using Block = Dune::DynamicMatrix<Scalar>;
using Matrix = Dune::BCRSMatrix<Block>;
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& pinfo)
: B_(&B), pinfo_(&pinfo)
{}
//! y = A x
template<class X, class Y>
void mv (const X& x, Y& y) const
{
#if !defined(NDEBUG) && HAVE_MPI
// We need to make sure that all ranks are actually computing
// for the same well. Doing this by checking the name of the well.
int cstring_size = pinfo_->name().size()+1;
std::vector<int> sizes(pinfo_->communication().size());
pinfo_->communication().allgather(&cstring_size, 1, sizes.data());
std::vector<int> offsets(sizes.size()+1, 0); //last entry will be accumulated size
std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1);
std::vector<char> cstrings(offsets[sizes.size()]);
bool consistentWells = true;
char* send = const_cast<char*>(pinfo_->name().c_str());
pinfo_->communication().allgatherv(send, cstring_size,
cstrings.data(), sizes.data(),
offsets.data());
for(std::size_t i = 0; i < sizes.size(); ++i)
{
std::string name(cstrings.data()+offsets[i]);
if (name != pinfo_->name())
{
if (pinfo_->communication().rank() == 0)
{
//only one process per well logs, might not be 0 of MPI_COMM_WORLD, though
std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well")
+ " well should be " + pinfo_->name() + " but is "
+ name;
OpmLog::debug(msg);
}
consistentWells = false;
break;
}
}
pinfo_->communication().barrier();
// As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all
if (!consistentWells)
{
MPI_Abort(MPI_COMM_WORLD, 1);
}
#endif
B_->mv(x, y);
// The B matrix is basically a component-wise multiplication
// with a vector followed by a parallel reduction. We do that
// reduction to all ranks computing for the well to save the
// broadcast when applying C^T.
using YField = typename Y::block_type::value_type;
assert(y.size() == 1);
this->pinfo_->communication().allreduce<std::plus<YField>>(y[0].container().data(),
y[0].container().size());
}
//! y = A x
template<class X, class Y>
void mmv (const X& x, Y& y) const
{
Y temp(y);
mv(x, temp); // includes parallel reduction
y -= temp;
}
private:
const Matrix* B_;
const ParallelWellInfo* pinfo_;
};
inline
double computeHydrostaticCorrection(const double well_ref_depth, const double vfp_ref_depth,
const double rho, const double gravity) {
@ -44,6 +138,38 @@ namespace Opm {
/// \brief Sums entries of the diagonal Matrix for distributed wells
template<typename Scalar, typename Comm>
void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat, Dune::DynamicVector<Scalar>& vec,
const Comm& comm)
{
// DynamicMatrix does not use one contiguous array for storing the data
// but a DynamicVector of DynamicVectors. Hence we need to copy the data
// to contiguous memory for MPI.
if (comm.size() == 1)
{
return;
}
std::vector<Scalar> allEntries;
allEntries.reserve(mat.N()*mat.M()+vec.size());
for(const auto& row: mat)
{
allEntries.insert(allEntries.end(), row.begin(), row.end());
}
allEntries.insert(allEntries.end(), vec.begin(), vec.end());
comm.sum(allEntries.data(), allEntries.size());
auto pos = allEntries.begin();
auto cols = mat.cols();
for(auto&& row: mat)
{
std::copy(pos, pos + cols, &(row[0]));
pos += cols;
}
assert(std::size_t(allEntries.end() - pos) == vec.size());
std::copy(pos, allEntries.end(), &(vec[0]));
}
template <int dim, class C2F, class FC>
std::array<double, dim>

View File

@ -121,7 +121,9 @@ namespace Opm
has_brine,
Indices::numPhases >;
/// Constructor
WellInterface(const Well& well, const int time_step,
WellInterface(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -317,6 +319,8 @@ namespace Opm
Well well_ecl_;
const ParallelWellInfo* parallel_well_info_;
const int current_step_;
// simulation parameters

View File

@ -30,6 +30,7 @@ namespace Opm
template<typename TypeTag>
WellInterface<TypeTag>::
WellInterface(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
@ -40,6 +41,7 @@ namespace Opm
const int first_perf_index,
const std::vector<PerforationData>& perf_data)
: well_ecl_(well)
, parallel_well_info_(&pw_info)
, current_step_(time_step)
, param_(param)
, rateConverter_(rate_converter)

View File

@ -126,8 +126,9 @@ BOOST_AUTO_TEST_CASE(TestStandardWellInput) {
Opm::PerforationData dummy;
std::vector<Opm::PerforationData> pdata(well.getConnections().size(), dummy);
Opm::ParallelWellInfo pinfo{well.name()};
BOOST_CHECK_THROW( StandardWell( well, -1, param, *rateConverter, 0, 3, 3, 0, 0, pdata), std::invalid_argument);
BOOST_CHECK_THROW( StandardWell( well, pinfo, -1, param, *rateConverter, 0, 3, 3, 0, 0, pdata), std::invalid_argument);
}
@ -156,7 +157,8 @@ BOOST_AUTO_TEST_CASE(TestBehavoir) {
std::vector<int>(10, 0)));
Opm::PerforationData dummy;
std::vector<Opm::PerforationData> pdata(wells_ecl[w].getConnections().size(), dummy);
wells.emplace_back(new StandardWell(wells_ecl[w], current_timestep, param, *rateConverter, 0, 3, 3, w, 0, pdata) );
Opm::ParallelWellInfo pinfo{wells_ecl[w].name()};
wells.emplace_back(new StandardWell(wells_ecl[w], pinfo, current_timestep, param, *rateConverter, 0, 3, 3, w, 0, pdata) );
}
}