Merge pull request #5819 from lisajulia/feature/ms-wells-separate-mpi-calls-from-business-logic

Remove the mpi calls from the business logic
This commit is contained in:
Atgeirr Flø Rasmussen
2024-12-20 10:14:17 +01:00
committed by GitHub
4 changed files with 102 additions and 22 deletions

View File

@@ -35,6 +35,8 @@
#include <opm/simulators/utils/DeferredLogger.hpp>
#include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <dune/istl/preconditioners.hh>
#include <dune/istl/solvers.hh>
@@ -97,6 +99,45 @@ ValueType OIWEmulsionViscosity(const ValueType& water_viscosity,
namespace Opm::mswellhelpers {
template<class MatrixType>
ParallellMSWellB<MatrixType>::
ParallellMSWellB(const MatrixType& B,
const ParallelWellInfo<Scalar>& parallel_well_info)
: B_(B), parallel_well_info_(parallel_well_info)
{}
template<class MatrixType>
template<class X, class Y>
void ParallellMSWellB<MatrixType>::
mv (const X& x, Y& y) const
{
B_.mv(x, y);
if (this->parallel_well_info_.communication().size() > 1)
{
// Communicate here to get the contributions from all segments
this->parallel_well_info_.communication().sum(y.data(), y.size());
}
}
template<class MatrixType>
template<class X, class Y>
void ParallellMSWellB<MatrixType>::
mmv (const X& x, Y& y) const
{
if (this->parallel_well_info_.communication().size() == 1)
{
// Do the same thing as before. The else branch
// produces different rounding errors and results
// slightly different iteration counts / well curves
B_.mmv(x, y);
} else {
Y temp(y);
mv(x, temp); // includes parallel reduction
y -= temp;
}
}
/// Applies umfpack and checks for singularity
template <typename MatrixType, typename VectorType>
VectorType
@@ -308,8 +349,24 @@ ValueType emulsionViscosity(const ValueType& water_fraction,
template<class Scalar, int Dim>
using Vec = Dune::BlockVector<Dune::FieldVector<Scalar,Dim>>;
template<class Scalar, int Dim>
using Mat = Dune::BCRSMatrix<Dune::FieldMatrix<Scalar,Dim,Dim>>;
template<class Scalar, int M, int N = M>
using Mat = Dune::BCRSMatrix<Dune::FieldMatrix<Scalar,M,N>>;
#define INSTANTIATE_PARALLELLMSWELLB(T, M, N) \
template class ParallellMSWellB<Mat<T,M,N>>; \
template void ParallellMSWellB<Mat<T,M,N>>::mv(Vec<T,N> const&,Vec<T,M>& ) const; \
template void ParallellMSWellB<Mat<T,M,N>>::mmv(Vec<T,N> const&,Vec<T,M>& ) const;
#define INSTANTIATE_ALL_PARALLELLMSWELLB(T) \
INSTANTIATE_PARALLELLMSWELLB(T, 2, 1) \
INSTANTIATE_PARALLELLMSWELLB(T, 2, 2) \
INSTANTIATE_PARALLELLMSWELLB(T, 2, 6) \
INSTANTIATE_PARALLELLMSWELLB(T, 3, 2) \
INSTANTIATE_PARALLELLMSWELLB(T, 3, 3) \
INSTANTIATE_PARALLELLMSWELLB(T, 3, 4) \
INSTANTIATE_PARALLELLMSWELLB(T, 4, 3) \
INSTANTIATE_PARALLELLMSWELLB(T, 4, 4) \
INSTANTIATE_PARALLELLMSWELLB(T, 4, 5)
#define INSTANTIATE_UMF(T,Dim) \
template Vec<T,Dim> applyUMFPack(Dune::UMFPack<Mat<T,Dim>>&, \
@@ -353,7 +410,8 @@ using Mat = Dune::BCRSMatrix<Dune::FieldMatrix<Scalar,Dim,Dim>>;
INSTANTIATE_EVAL(T,6) \
INSTANTIATE_EVAL(T,7) \
INSTANTIATE_EVAL(T,8) \
INSTANTIATE_EVAL(T,9)
INSTANTIATE_EVAL(T,9) \
INSTANTIATE_ALL_PARALLELLMSWELLB(T)
INSTANTIATE_TYPE(double)

View File

@@ -31,12 +31,45 @@ template<class Matrix> class UMFPack;
namespace Opm {
template<class Scalar> class ParallelWellInfo;
class DeferredLogger;
class SICD;
namespace mswellhelpers
{
/// \brief A wrapper around the B matrix for distributed MS wells
///
/// For wells the B matrix, is basically a multiplication
/// of the equation of the perforated cells followed by a reduction
/// (summation) of these to the well equations.
///
/// This class does that in the functions mv and mmv (from the DUNE
/// matrix interface.
///
/// \tparam MatrixType The MatrixType of the Matrix B. From this, we
/// deduce the Scalar used for the computation.
template<class MatrixType>
class ParallellMSWellB
{
public:
using Scalar = typename MatrixType::field_type;
ParallellMSWellB(const MatrixType& B,
const ParallelWellInfo<Scalar>& parallel_well_info);
//! y = A x
template<class X, class Y>
void mv (const X& x, Y& y) const;
//! y = A x
template<class X, class Y>
void mmv (const X& x, Y& y) const;
private:
const MatrixType& B_;
const ParallelWellInfo<Scalar>& parallel_well_info_;
};
/// Applies umfpack and checks for singularity
template <typename MatrixType, typename VectorType>
VectorType

View File

@@ -38,7 +38,6 @@
#include <opm/simulators/linalg/SmallDenseMatrixUtils.hpp>
#include <opm/simulators/wells/WellHelpers.hpp>
#include <opm/simulators/wells/MSWellHelpers.hpp>
#include <opm/simulators/wells/MultisegmentWellGeneric.hpp>
#include <opm/simulators/wells/WellInterfaceGeneric.hpp>
@@ -52,6 +51,7 @@ MultisegmentWellEquations<Scalar,numWellEq,numEq>::
MultisegmentWellEquations(const MultisegmentWellGeneric<Scalar>& well, const ParallelWellInfo<Scalar>& pw_info)
: well_(well)
, pw_info_(pw_info)
, parallelB_(duneB_, pw_info)
{
}
@@ -150,12 +150,7 @@ apply(const BVector& x, BVector& Ax) const
{
BVectorWell Bx(duneB_.N());
duneB_.mv(x, Bx);
if (this->pw_info_.communication().size() > 1) {
// We need to communicate here to get the contributions from all segments
this->pw_info_.communication().sum(Bx.data(), Bx.size());
}
parallelB_.mv(x, Bx);
// It is ok to do this on each process instead of only on one,
// because the other processes would remain idle while waiting for
@@ -224,19 +219,9 @@ template<class Scalar, int numWellEq, int numEq>
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
recoverSolutionWell(const BVector& x, BVectorWell& xw) const
{
// resWell = resWell - B * x
BVectorWell resWell = resWell_;
if (this->pw_info_.communication().size() == 1) {
duneB_.mmv(x, resWell);
} else {
BVectorWell Bx(duneB_.N());
duneB_.mv(x, Bx);
// We need to communicate here to get the contributions from all segments
this->pw_info_.communication().sum(Bx.data(), Bx.size());
resWell -= Bx;
}
// resWell = resWell - B * x
parallelB_.mmv(x, resWell);
// xw = D^-1 * resWell
// It is ok to do this on each process instead of only on one,

View File

@@ -24,6 +24,7 @@
#include <opm/simulators/utils/ParallelCommunication.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/simulators/wells/MSWellHelpers.hpp>
#include <dune/common/fmatrix.hh>
#include <dune/common/fvector.hh>
#include <dune/istl/bcrsmatrix.hh>
@@ -153,6 +154,9 @@ public:
std::vector<int> cells_;
const ParallelWellInfo<Scalar>& pw_info_;
// Wrapper for the parallel application of B for distributed wells
mswellhelpers::ParallellMSWellB<OffDiagMatWell> parallelB_;
};
}