Merge pull request #1789 from blattms/sequential-cpr-dune-2.4

Support CPR when using DUNE 2.4 and no MPI
This commit is contained in:
Bård Skaflestad 2019-04-11 22:10:37 +02:00 committed by GitHub
commit 08be4ed0f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 128 additions and 12 deletions

View File

@ -63,18 +63,28 @@ namespace Opm
using CritBase = Dune::Amg::SymmetricCriterion<Matrix, CouplingMetric>;
using Criterion = Dune::Amg::CoarsenCriterion<CritBase>;
using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator<Matrix, Vector, Vector, Dune::OwnerOverlapCopyCommunication<int,int> >;
using CprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::Amg::SequentialInformation>;
using CprSmootherCoarse = CprSmootherFine;
using BlackoilAmgType = BlackoilAmgCpr<MatrixAdapter,CprSmootherFine, CprSmootherCoarse, Criterion, Dune::Amg::SequentialInformation,
pressureEqnIndex, pressureVarIndex>;
using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, Dune::OwnerOverlapCopyCommunication<int,int> >;
using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>;
#if HAVE_MPI
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator<Matrix, Vector, Vector, POrComm >;
using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0<Matrix, Vector, Vector, POrComm >;
using ParallelCprSmootherCoarse = ParallelCprSmootherFine;
using ParallelBlackoilAmgType = BlackoilAmgCpr<ParallelMatrixAdapter, ParallelCprSmootherFine, ParallelCprSmootherCoarse, Criterion,
Dune::OwnerOverlapCopyCommunication<int,int>, pressureEqnIndex, pressureVarIndex>;
using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>;
using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>;
POrComm, pressureEqnIndex, pressureVarIndex>;
using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>;
using ParallelScalarProduct = Dune::OverlappingSchwarzScalarProduct<Vector, POrComm>;
#else
using POrComm = Dune::Amg::SequentialInformation;
using ParallelBlackoilAmgType = BlackoilAmgType;
using ParallelScalarProduct = Dune::SeqScalarProduct<Vector>;
using ParallelMatrixAdapter = MatrixAdapter;
using OperatorParallel = OperatorSerial;
#endif
public:
static void registerParameters()
@ -139,8 +149,6 @@ namespace Opm
}
}
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
#if DUNE_VERSION_NEWER(DUNE_ISTL, 2, 6)
constexpr Dune::SolverCategory::Category category=Dune::SolverCategory::overlapping;
auto sp = Dune::createScalarProduct<Vector,POrComm>(*comm_, category);
@ -229,7 +237,7 @@ namespace Opm
BlackoilAmgType, ParallelBlackoilAmgType>::type;
using SpType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
Dune::SeqScalarProduct<Vector>,
Dune::OverlappingSchwarzScalarProduct<Vector, Comm> >::type;
ParallelScalarProduct >::type;
using OperatorType = typename std::conditional<std::is_same<Comm, Dune::Amg::SequentialInformation>::value,
MatrixAdapter, ParallelMatrixAdapter>::type;
typedef typename AmgType::Smoother Smoother;
@ -311,7 +319,6 @@ namespace Opm
SPPointer sp_;
std::shared_ptr< Dune::BiCGSTABSolver<Vector> > linsolve_;
const void* oldMat;
using POrComm = Dune::OwnerOverlapCopyCommunication<int,int>;
std::shared_ptr<POrComm> comm_;
}; // end ISTLSolver

View File

@ -23,6 +23,78 @@ namespace Dune
{
namespace Amg
{
#if !DUNE_VERSION_NEWER(DUNE_ISTL, 2, 5)
template<class M, class V>
struct DirectSolverSelector
{
#if DISABLE_AMG_DIRECTSOLVER
static constexpr bool isDirectSolver = false;
using type = void;
#elif HAVE_SUITESPARSE_UMFPACK
using field_type = typename M::field_type;
using type = typename std::conditional<std::is_same<double, field_type>::value, UMFPack<M>,
typename std::conditional<std::is_same<std::complex<double>, field_type>::value,
UMFPack<M>,
void>::type>::type;
static constexpr bool isDirectSolver = std::is_same<UMFPack<M>, type>::value;
#elif HAVE_SUPERLU
static constexpr bool isDirectSolver = true;
using type = SuperLU<M>;
#else
static constexpr bool isDirectSolver = false;
using type = void;
#endif
static type* create(const M& mat, bool verbose, bool reusevector )
{
create(mat, verbose, reusevector, std::integral_constant<bool, isDirectSolver>());
}
static type* create(const M& mat, bool verbose, bool reusevector, std::integral_constant<bool, false> )
{
DUNE_THROW(NotImplemented,"DirectSolver not selected");
return nullptr;
}
static type* create(const M& mat, bool verbose, bool reusevector, std::integral_constant<bool, true> )
{
return new type(mat, verbose, reusevector);
}
static std::string name()
{
if(std::is_same<type,void>::value)
return "None";
#if HAVE_SUITESPARSE_UMFPACK
if(std::is_same<type, UMFPack<M> >::value)
return "UMFPack";
#endif
#if HAVE_SUPERLU
if(std::is_same<type, SuperLU<M> >::value)
return "SuperLU";
#endif
}
};
#endif
#if HAVE_MPI
template<class M, class T>
void redistributeMatrixAmg(M&, M&, SequentialInformation&, SequentialInformation&, T&)
{
// noop
}
template<class M, class PI>
typename std::enable_if<!std::is_same<PI,SequentialInformation>::value,void>::type
redistributeMatrixAmg(M& mat, M& matRedist, PI& info, PI& infoRedist,
Dune::RedistributeInformation<PI>& redistInfo)
{
info.buildGlobalLookup(mat.N());
redistributeMatrixEntries(mat, matRedist, info, infoRedist, redistInfo);
info.freeGlobalLookup();
}
#endif
/**
* @defgroup ISTL_PAAMG Parallel Algebraic Multigrid
* @ingroup ISTL_Prec
@ -175,7 +247,44 @@ namespace Dune
*/
void recalculateHierarchy()
{
matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
auto copyFlags = NegateSet<typename PI::OwnerSet>();
const auto& matrices = matrices_->matrices();
const auto& aggregatesMapHierarchy = matrices_->aggregatesMaps();
const auto& infoHierarchy = matrices_->parallelInformation();
const auto& redistInfoHierarchy = matrices_->redistributeInformation();
BaseGalerkinProduct productBuilder;
auto aggregatesMap = aggregatesMapHierarchy.begin();
auto info = infoHierarchy.finest();
auto redistInfo = redistInfoHierarchy.begin();
auto matrix = matrices.finest();
auto coarsestMatrix = matrices.coarsest();
using Matrix = typename M::matrix_type;
#if HAVE_MPI
if(matrix.isRedistributed()) {
redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
const_cast<Matrix&>(matrix.getRedistributed().getmat()),
const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
}
#endif
for(; matrix!=coarsestMatrix; ++aggregatesMap) {
const Matrix& fine = (matrix.isRedistributed() ? matrix.getRedistributed() : *matrix).getmat();
++matrix;
++info;
++redistInfo;
productBuilder.calculate(fine, *(*aggregatesMap), const_cast<Matrix&>(matrix->getmat()), *info, copyFlags);
#if HAVE_MPI
if(matrix.isRedistributed()) {
redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
const_cast<Matrix&>(matrix.getRedistributed().getmat()),
const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
}
#endif
}
}
/**
@ -419,7 +528,7 @@ namespace Dune
buildHierarchy_= true;
coarsesolverconverged = true;
smoothers_.reset(new Hierarchy<Smoother,A>);
matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
recalculateHierarchy();
matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
setupCoarseSolver();
if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {