From 26e1a9a94d194d2ce22ec02e137717fdf5d7f6b3 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 11 Apr 2019 14:15:04 +0200 Subject: [PATCH 1/2] Support DUNE 2.4 with CPR. This version does not support the DirectsolverSelector struct and we therefore implemented a fallback. --- opm/autodiff/amgcpr.hh | 52 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/opm/autodiff/amgcpr.hh b/opm/autodiff/amgcpr.hh index 838b220b1..1aa4bec72 100644 --- a/opm/autodiff/amgcpr.hh +++ b/opm/autodiff/amgcpr.hh @@ -23,6 +23,58 @@ namespace Dune { namespace Amg { +#if !DUNE_VERSION_NEWER(DUNE_ISTL, 2, 5) + template + struct DirectSolverSelector + { +#if DISABLE_AMG_DIRECTSOLVER + static constexpr bool isDirectSolver = false; + using type = void; +#elif HAVE_SUITESPARSE_UMFPACK + using field_type = typename M::field_type; + using type = typename std::conditional::value, UMFPack, + typename std::conditional, field_type>::value, + UMFPack, + void>::type>::type; + static constexpr bool isDirectSolver = std::is_same, type>::value; +#elif HAVE_SUPERLU + static constexpr bool isDirectSolver = true; + using type = SuperLU; +#else + static constexpr bool isDirectSolver = false; + using type = void; +#endif + + static type* create(const M& mat, bool verbose, bool reusevector ) + { + create(mat, verbose, reusevector, std::integral_constant()); + } + static type* create(const M& mat, bool verbose, bool reusevector, std::integral_constant ) + { + DUNE_THROW(NotImplemented,"DirectSolver not selected"); + return nullptr; + } + + static type* create(const M& mat, bool verbose, bool reusevector, std::integral_constant ) + { + return new type(mat, verbose, reusevector); + } + static std::string name() + { + if(std::is_same::value) + return "None"; +#if HAVE_SUITESPARSE_UMFPACK + if(std::is_same >::value) + return "UMFPack"; +#endif +#if HAVE_SUPERLU + if(std::is_same >::value) + return "SuperLU"; +#endif + } + }; + +#endif /** * @defgroup ISTL_PAAMG Parallel Algebraic Multigrid * @ingroup ISTL_Prec From a270994ad0d29ba925cab277515b517e4b9e6d1f Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 11 Apr 2019 16:14:57 +0200 Subject: [PATCH 2/2] Support compiling Cpr without any MPI support. There is a problem compiling upstream Hierarchy::recalculateGalerkin() without MPI. Therefore we reimplement that method in our AMG version. Fixes will travel upstream. Apart from that we made sure that all types have a meaning even if there is no MPI found and that we at least have redistribute method to call even if using SequentialInformation. In that case the method does nothing but prevents compilation errors complaining about missing parallel functionality. --- opm/autodiff/ISTLSolverEbosCpr.hpp | 27 ++++++++----- opm/autodiff/amgcpr.hh | 61 +++++++++++++++++++++++++++++- 2 files changed, 76 insertions(+), 12 deletions(-) diff --git a/opm/autodiff/ISTLSolverEbosCpr.hpp b/opm/autodiff/ISTLSolverEbosCpr.hpp index 3d07f3300..c98b007e6 100644 --- a/opm/autodiff/ISTLSolverEbosCpr.hpp +++ b/opm/autodiff/ISTLSolverEbosCpr.hpp @@ -63,18 +63,28 @@ namespace Opm using CritBase = Dune::Amg::SymmetricCriterion; using Criterion = Dune::Amg::CoarsenCriterion; - using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator >; using CprSmootherFine = Opm::ParallelOverlappingILU0; using CprSmootherCoarse = CprSmootherFine; using BlackoilAmgType = BlackoilAmgCpr; - using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0 >; + using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>; + +#if HAVE_MPI + using POrComm = Dune::OwnerOverlapCopyCommunication; + using ParallelMatrixAdapter = Dune::OverlappingSchwarzOperator; + using ParallelCprSmootherFine = Opm::ParallelOverlappingILU0; using ParallelCprSmootherCoarse = ParallelCprSmootherFine; using ParallelBlackoilAmgType = BlackoilAmgCpr, pressureEqnIndex, pressureVarIndex>; - - using OperatorSerial = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, false>; - using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>; + POrComm, pressureEqnIndex, pressureVarIndex>; + using OperatorParallel = WellModelMatrixAdapter< Matrix, Vector, Vector, WellModel, true>; + using ParallelScalarProduct = Dune::OverlappingSchwarzScalarProduct; +#else + using POrComm = Dune::Amg::SequentialInformation; + using ParallelBlackoilAmgType = BlackoilAmgType; + using ParallelScalarProduct = Dune::SeqScalarProduct; + using ParallelMatrixAdapter = MatrixAdapter; + using OperatorParallel = OperatorSerial; +#endif public: static void registerParameters() @@ -139,8 +149,6 @@ namespace Opm } } - using POrComm = Dune::OwnerOverlapCopyCommunication; - #if DUNE_VERSION_NEWER(DUNE_ISTL, 2, 6) constexpr Dune::SolverCategory::Category category=Dune::SolverCategory::overlapping; auto sp = Dune::createScalarProduct(*comm_, category); @@ -229,7 +237,7 @@ namespace Opm BlackoilAmgType, ParallelBlackoilAmgType>::type; using SpType = typename std::conditional::value, Dune::SeqScalarProduct, - Dune::OverlappingSchwarzScalarProduct >::type; + ParallelScalarProduct >::type; using OperatorType = typename std::conditional::value, MatrixAdapter, ParallelMatrixAdapter>::type; typedef typename AmgType::Smoother Smoother; @@ -311,7 +319,6 @@ namespace Opm SPPointer sp_; std::shared_ptr< Dune::BiCGSTABSolver > linsolve_; const void* oldMat; - using POrComm = Dune::OwnerOverlapCopyCommunication; std::shared_ptr comm_; }; // end ISTLSolver diff --git a/opm/autodiff/amgcpr.hh b/opm/autodiff/amgcpr.hh index 1aa4bec72..b0c82f876 100644 --- a/opm/autodiff/amgcpr.hh +++ b/opm/autodiff/amgcpr.hh @@ -75,6 +75,26 @@ namespace Dune }; #endif + + +#if HAVE_MPI + template + void redistributeMatrixAmg(M&, M&, SequentialInformation&, SequentialInformation&, T&) + { + // noop + } + + template + typename std::enable_if::value,void>::type + redistributeMatrixAmg(M& mat, M& matRedist, PI& info, PI& infoRedist, + Dune::RedistributeInformation& redistInfo) + { + info.buildGlobalLookup(mat.N()); + redistributeMatrixEntries(mat, matRedist, info, infoRedist, redistInfo); + info.freeGlobalLookup(); + } +#endif + /** * @defgroup ISTL_PAAMG Parallel Algebraic Multigrid * @ingroup ISTL_Prec @@ -227,7 +247,44 @@ namespace Dune */ void recalculateHierarchy() { - matrices_->recalculateGalerkin(NegateSet()); + auto copyFlags = NegateSet(); + const auto& matrices = matrices_->matrices(); + const auto& aggregatesMapHierarchy = matrices_->aggregatesMaps(); + const auto& infoHierarchy = matrices_->parallelInformation(); + const auto& redistInfoHierarchy = matrices_->redistributeInformation(); + BaseGalerkinProduct productBuilder; + auto aggregatesMap = aggregatesMapHierarchy.begin(); + auto info = infoHierarchy.finest(); + auto redistInfo = redistInfoHierarchy.begin(); + auto matrix = matrices.finest(); + auto coarsestMatrix = matrices.coarsest(); + + using Matrix = typename M::matrix_type; + +#if HAVE_MPI + if(matrix.isRedistributed()) { + redistributeMatrixAmg(const_cast(matrix->getmat()), + const_cast(matrix.getRedistributed().getmat()), + const_cast(*info), const_cast(info.getRedistributed()), + const_cast&>(*redistInfo)); + } +#endif + + for(; matrix!=coarsestMatrix; ++aggregatesMap) { + const Matrix& fine = (matrix.isRedistributed() ? matrix.getRedistributed() : *matrix).getmat(); + ++matrix; + ++info; + ++redistInfo; + productBuilder.calculate(fine, *(*aggregatesMap), const_cast(matrix->getmat()), *info, copyFlags); +#if HAVE_MPI + if(matrix.isRedistributed()) { + redistributeMatrixAmg(const_cast(matrix->getmat()), + const_cast(matrix.getRedistributed().getmat()), + const_cast(*info), const_cast(info.getRedistributed()), + const_cast&>(*redistInfo)); + } +#endif + } } /** @@ -471,7 +528,7 @@ namespace Dune buildHierarchy_= true; coarsesolverconverged = true; smoothers_.reset(new Hierarchy); - matrices_->recalculateGalerkin(NegateSet()); + recalculateHierarchy(); matrices_->coarsenSmoother(*smoothers_, smootherArgs_); setupCoarseSolver(); if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {