Support compiling Cpr without any MPI support.

There is a problem compiling upstream
Hierarchy::recalculateGalerkin() without MPI. Therefore we
reimplement that method in our AMG version. Fixes will travel
upstream.

Apart from that we made sure that all types have a meaning even
if there is no MPI found and that we at least have redistribute method
to call even if using SequentialInformation. In that case the method
does nothing but prevents compilation errors complaining about missing
parallel functionality.
This commit is contained in:
Markus Blatt 2019-04-11 16:14:57 +02:00
parent 26e1a9a94d
commit a270994ad0
2 changed files with 76 additions and 12 deletions

View File

@ -63,18 +63,28 @@ namespace Opm
using CritBase = Dune::Amg::SymmetricCriterion<Matrix, CouplingMetric>;
using Criterion = Dune::Amg::CoarsenCriterion<CritBase>;
using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator<Matrix, Vector, Vector, Dune::OwnerOverlapCopyCommunication<int,int> >;
using CprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::Amg::SequentialInformation>;
using CprSmootherCoarse = CprSmootherFine;
using BlackoilAmgType = BlackoilAmgCpr<MatrixAdapter,CprSmootherFine, CprSmootherCoarse, Criterion, Dune::Amg::SequentialInformation,
pressureEqnIndex, pressureVarIndex>;
using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::OwnerOverlapCopyCommunication<int,int> >;
using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>;
#if HAVE_MPI
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator<Matrix, Vector, Vector, POrComm >;
using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, POrComm >;
using ParallelCprSmootherCoarse = ParallelCprSmootherFine;
using ParallelBlackoilAmgType = BlackoilAmgCpr<ParallelMatrixAdapter, ParallelCprSmootherFine, ParallelCprSmootherCoarse, Criterion,
Dune::OwnerOverlapCopyCommunication<int,int>, pressureEqnIndex, pressureVarIndex>;
using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>;
using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>;
POrComm, pressureEqnIndex, pressureVarIndex>;
using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>;
using ParallelScalarProduct = Dune::OverlappingSchwarzScalarProduct<Vector, POrComm>;
#else
using POrComm = Dune::Amg::SequentialInformation;
using ParallelBlackoilAmgType = BlackoilAmgType;
using ParallelScalarProduct = Dune::SeqScalarProduct<Vector>;
using ParallelMatrixAdapter = MatrixAdapter;
using OperatorParallel = OperatorSerial;
#endif
public:
static void registerParameters()
@ -139,8 +149,6 @@ namespace Opm
}
}
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
#if DUNE_VERSION_NEWER(DUNE_ISTL, 2, 6)
constexpr Dune::SolverCategory::Category category=Dune::SolverCategory::overlapping;
auto sp = Dune::createScalarProduct<Vector,POrComm>(*comm_, category);
@ -229,7 +237,7 @@ namespace Opm
BlackoilAmgType, ParallelBlackoilAmgType>::type;
using SpType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
Dune::SeqScalarProduct<Vector>,
Dune::OverlappingSchwarzScalarProduct<Vector, Comm> >::type;
ParallelScalarProduct >::type;
using OperatorType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
MatrixAdapter, ParallelMatrixAdapter>::type;
typedef typename AmgType::Smoother Smoother;
@ -311,7 +319,6 @@ namespace Opm
SPPointer sp_;
std::shared_ptr< Dune::BiCGSTABSolver<Vector> > linsolve_;
const void* oldMat;
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
std::shared_ptr<POrComm> comm_;
}; // end ISTLSolver

View File

@ -75,6 +75,26 @@ namespace Dune
};
#endif
#if HAVE_MPI
template<class M, class T>
void redistributeMatrixAmg(M&, M&, SequentialInformation&, SequentialInformation&, T&)
{
// noop
}
template<class M, class PI>
typename std::enable_if<!std::is_same<PI,SequentialInformation>::value,void>::type
redistributeMatrixAmg(M& mat, M& matRedist, PI& info, PI& infoRedist,
Dune::RedistributeInformation<PI>& redistInfo)
{
info.buildGlobalLookup(mat.N());
redistributeMatrixEntries(mat, matRedist, info, infoRedist, redistInfo);
info.freeGlobalLookup();
}
#endif
/**
* @defgroup ISTL_PAAMG Parallel Algebraic Multigrid
* @ingroup ISTL_Prec
@ -227,7 +247,44 @@ namespace Dune
*/
void recalculateHierarchy()
{
matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
auto copyFlags = NegateSet<typename PI::OwnerSet>();
const auto& matrices = matrices_->matrices();
const auto& aggregatesMapHierarchy = matrices_->aggregatesMaps();
const auto& infoHierarchy = matrices_->parallelInformation();
const auto& redistInfoHierarchy = matrices_->redistributeInformation();
BaseGalerkinProduct productBuilder;
auto aggregatesMap = aggregatesMapHierarchy.begin();
auto info = infoHierarchy.finest();
auto redistInfo = redistInfoHierarchy.begin();
auto matrix = matrices.finest();
auto coarsestMatrix = matrices.coarsest();
using Matrix = typename M::matrix_type;
#if HAVE_MPI
if(matrix.isRedistributed()) {
redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
const_cast<Matrix&>(matrix.getRedistributed().getmat()),
const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
}
#endif
for(; matrix!=coarsestMatrix; ++aggregatesMap) {
const Matrix& fine = (matrix.isRedistributed() ? matrix.getRedistributed() : *matrix).getmat();
++matrix;
++info;
++redistInfo;
productBuilder.calculate(fine, *(*aggregatesMap), const_cast<Matrix&>(matrix->getmat()), *info, copyFlags);
#if HAVE_MPI
if(matrix.isRedistributed()) {
redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
const_cast<Matrix&>(matrix.getRedistributed().getmat()),
const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
}
#endif
}
}
/**
@ -471,7 +528,7 @@ namespace Dune
buildHierarchy_= true;
coarsesolverconverged = true;
smoothers_.reset(new Hierarchy<Smoother,A>);
matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
recalculateHierarchy();
matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
setupCoarseSolver();
if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {