mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Support compiling Cpr without any MPI support.
There is a problem compiling upstream Hierarchy::recalculateGalerkin() without MPI. Therefore we reimplement that method in our AMG version. Fixes will travel upstream. Apart from that we made sure that all types have a meaning even if there is no MPI found and that we at least have redistribute method to call even if using SequentialInformation. In that case the method does nothing but prevents compilation errors complaining about missing parallel functionality.
This commit is contained in:
parent
26e1a9a94d
commit
a270994ad0
@ -63,18 +63,28 @@ namespace Opm
|
|||||||
using CritBase = Dune::Amg::SymmetricCriterion<Matrix, CouplingMetric>;
|
using CritBase = Dune::Amg::SymmetricCriterion<Matrix, CouplingMetric>;
|
||||||
using Criterion = Dune::Amg::CoarsenCriterion<CritBase>;
|
using Criterion = Dune::Amg::CoarsenCriterion<CritBase>;
|
||||||
|
|
||||||
using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator<Matrix, Vector, Vector, Dune::OwnerOverlapCopyCommunication<int,int> >;
|
|
||||||
using CprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::Amg::SequentialInformation>;
|
using CprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::Amg::SequentialInformation>;
|
||||||
using CprSmootherCoarse = CprSmootherFine;
|
using CprSmootherCoarse = CprSmootherFine;
|
||||||
using BlackoilAmgType = BlackoilAmgCpr<MatrixAdapter,CprSmootherFine, CprSmootherCoarse, Criterion, Dune::Amg::SequentialInformation,
|
using BlackoilAmgType = BlackoilAmgCpr<MatrixAdapter,CprSmootherFine, CprSmootherCoarse, Criterion, Dune::Amg::SequentialInformation,
|
||||||
pressureEqnIndex, pressureVarIndex>;
|
pressureEqnIndex, pressureVarIndex>;
|
||||||
using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::OwnerOverlapCopyCommunication<int,int> >;
|
using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>;
|
||||||
|
|
||||||
|
#if HAVE_MPI
|
||||||
|
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
|
||||||
|
using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator<Matrix, Vector, Vector, POrComm >;
|
||||||
|
using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, POrComm >;
|
||||||
using ParallelCprSmootherCoarse = ParallelCprSmootherFine;
|
using ParallelCprSmootherCoarse = ParallelCprSmootherFine;
|
||||||
using ParallelBlackoilAmgType = BlackoilAmgCpr<ParallelMatrixAdapter, ParallelCprSmootherFine, ParallelCprSmootherCoarse, Criterion,
|
using ParallelBlackoilAmgType = BlackoilAmgCpr<ParallelMatrixAdapter, ParallelCprSmootherFine, ParallelCprSmootherCoarse, Criterion,
|
||||||
Dune::OwnerOverlapCopyCommunication<int,int>, pressureEqnIndex, pressureVarIndex>;
|
POrComm, pressureEqnIndex, pressureVarIndex>;
|
||||||
|
using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>;
|
||||||
using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>;
|
using ParallelScalarProduct = Dune::OverlappingSchwarzScalarProduct<Vector, POrComm>;
|
||||||
using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>;
|
#else
|
||||||
|
using POrComm = Dune::Amg::SequentialInformation;
|
||||||
|
using ParallelBlackoilAmgType = BlackoilAmgType;
|
||||||
|
using ParallelScalarProduct = Dune::SeqScalarProduct<Vector>;
|
||||||
|
using ParallelMatrixAdapter = MatrixAdapter;
|
||||||
|
using OperatorParallel = OperatorSerial;
|
||||||
|
#endif
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void registerParameters()
|
static void registerParameters()
|
||||||
@ -139,8 +149,6 @@ namespace Opm
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
|
|
||||||
|
|
||||||
#if DUNE_VERSION_NEWER(DUNE_ISTL, 2, 6)
|
#if DUNE_VERSION_NEWER(DUNE_ISTL, 2, 6)
|
||||||
constexpr Dune::SolverCategory::Category category=Dune::SolverCategory::overlapping;
|
constexpr Dune::SolverCategory::Category category=Dune::SolverCategory::overlapping;
|
||||||
auto sp = Dune::createScalarProduct<Vector,POrComm>(*comm_, category);
|
auto sp = Dune::createScalarProduct<Vector,POrComm>(*comm_, category);
|
||||||
@ -229,7 +237,7 @@ namespace Opm
|
|||||||
BlackoilAmgType, ParallelBlackoilAmgType>::type;
|
BlackoilAmgType, ParallelBlackoilAmgType>::type;
|
||||||
using SpType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
|
using SpType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
|
||||||
Dune::SeqScalarProduct<Vector>,
|
Dune::SeqScalarProduct<Vector>,
|
||||||
Dune::OverlappingSchwarzScalarProduct<Vector, Comm> >::type;
|
ParallelScalarProduct >::type;
|
||||||
using OperatorType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
|
using OperatorType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
|
||||||
MatrixAdapter, ParallelMatrixAdapter>::type;
|
MatrixAdapter, ParallelMatrixAdapter>::type;
|
||||||
typedef typename AmgType::Smoother Smoother;
|
typedef typename AmgType::Smoother Smoother;
|
||||||
@ -311,7 +319,6 @@ namespace Opm
|
|||||||
SPPointer sp_;
|
SPPointer sp_;
|
||||||
std::shared_ptr< Dune::BiCGSTABSolver<Vector> > linsolve_;
|
std::shared_ptr< Dune::BiCGSTABSolver<Vector> > linsolve_;
|
||||||
const void* oldMat;
|
const void* oldMat;
|
||||||
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
|
|
||||||
std::shared_ptr<POrComm> comm_;
|
std::shared_ptr<POrComm> comm_;
|
||||||
}; // end ISTLSolver
|
}; // end ISTLSolver
|
||||||
|
|
||||||
|
@ -75,6 +75,26 @@ namespace Dune
|
|||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#if HAVE_MPI
|
||||||
|
template<class M, class T>
|
||||||
|
void redistributeMatrixAmg(M&, M&, SequentialInformation&, SequentialInformation&, T&)
|
||||||
|
{
|
||||||
|
// noop
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class M, class PI>
|
||||||
|
typename std::enable_if<!std::is_same<PI,SequentialInformation>::value,void>::type
|
||||||
|
redistributeMatrixAmg(M& mat, M& matRedist, PI& info, PI& infoRedist,
|
||||||
|
Dune::RedistributeInformation<PI>& redistInfo)
|
||||||
|
{
|
||||||
|
info.buildGlobalLookup(mat.N());
|
||||||
|
redistributeMatrixEntries(mat, matRedist, info, infoRedist, redistInfo);
|
||||||
|
info.freeGlobalLookup();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @defgroup ISTL_PAAMG Parallel Algebraic Multigrid
|
* @defgroup ISTL_PAAMG Parallel Algebraic Multigrid
|
||||||
* @ingroup ISTL_Prec
|
* @ingroup ISTL_Prec
|
||||||
@ -227,7 +247,44 @@ namespace Dune
|
|||||||
*/
|
*/
|
||||||
void recalculateHierarchy()
|
void recalculateHierarchy()
|
||||||
{
|
{
|
||||||
matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
|
auto copyFlags = NegateSet<typename PI::OwnerSet>();
|
||||||
|
const auto& matrices = matrices_->matrices();
|
||||||
|
const auto& aggregatesMapHierarchy = matrices_->aggregatesMaps();
|
||||||
|
const auto& infoHierarchy = matrices_->parallelInformation();
|
||||||
|
const auto& redistInfoHierarchy = matrices_->redistributeInformation();
|
||||||
|
BaseGalerkinProduct productBuilder;
|
||||||
|
auto aggregatesMap = aggregatesMapHierarchy.begin();
|
||||||
|
auto info = infoHierarchy.finest();
|
||||||
|
auto redistInfo = redistInfoHierarchy.begin();
|
||||||
|
auto matrix = matrices.finest();
|
||||||
|
auto coarsestMatrix = matrices.coarsest();
|
||||||
|
|
||||||
|
using Matrix = typename M::matrix_type;
|
||||||
|
|
||||||
|
#if HAVE_MPI
|
||||||
|
if(matrix.isRedistributed()) {
|
||||||
|
redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
|
||||||
|
const_cast<Matrix&>(matrix.getRedistributed().getmat()),
|
||||||
|
const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
|
||||||
|
const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for(; matrix!=coarsestMatrix; ++aggregatesMap) {
|
||||||
|
const Matrix& fine = (matrix.isRedistributed() ? matrix.getRedistributed() : *matrix).getmat();
|
||||||
|
++matrix;
|
||||||
|
++info;
|
||||||
|
++redistInfo;
|
||||||
|
productBuilder.calculate(fine, *(*aggregatesMap), const_cast<Matrix&>(matrix->getmat()), *info, copyFlags);
|
||||||
|
#if HAVE_MPI
|
||||||
|
if(matrix.isRedistributed()) {
|
||||||
|
redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
|
||||||
|
const_cast<Matrix&>(matrix.getRedistributed().getmat()),
|
||||||
|
const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
|
||||||
|
const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -471,7 +528,7 @@ namespace Dune
|
|||||||
buildHierarchy_= true;
|
buildHierarchy_= true;
|
||||||
coarsesolverconverged = true;
|
coarsesolverconverged = true;
|
||||||
smoothers_.reset(new Hierarchy<Smoother,A>);
|
smoothers_.reset(new Hierarchy<Smoother,A>);
|
||||||
matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
|
recalculateHierarchy();
|
||||||
matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
|
matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
|
||||||
setupCoarseSolver();
|
setupCoarseSolver();
|
||||||
if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {
|
if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {
|
||||||
|
Loading…
Reference in New Issue
Block a user