/* Copyright 2016 IRIS AS Copyright 2019, 2020 Equinor ASA Copyright 2020 SINTEF Digital, Mathematics and Cybernetics This file is part of the Open Porous Media project (OPM). OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OPM. If not, see . */ #include #include #include #include #include #include #include #include #include #if COMPILE_BDA_BRIDGE #include #include #endif namespace Opm { namespace detail { #ifdef HAVE_MPI void copyParValues(std::any& parallelInformation, std::size_t size, Dune::OwnerOverlapCopyCommunication& comm) { if (parallelInformation.type() == typeid(ParallelISTLInformation)) { const ParallelISTLInformation* parinfo = std::any_cast(¶llelInformation); assert(parinfo); parinfo->copyValuesTo(comm.indexSet(), comm.remoteIndices(), size, 1); } } #endif template void makeOverlapRowsInvalid(Matrix& matrix, const std::vector& overlapRows) { //value to set on diagonal const int numEq = Matrix::block_type::rows; typename Matrix::block_type diag_block(0.0); for (int eq = 0; eq < numEq; ++eq) diag_block[eq][eq] = 1.0; //loop over precalculated overlap rows and columns for (const auto row : overlapRows) { // Zero out row. matrix[row] = 0.0; //diagonal block set to diag(1.0). matrix[row][row] = diag_block; } } template void FlexibleSolverInfo::create(const Matrix& matrix, bool parallel, const PropertyTree& prm, std::size_t pressureIndex, std::function weightsCalculator, const bool forceSerial, [[maybe_unused]] Comm& comm) { // Write sizes of linear systems on all ranks to debug log. if (!forceSerial) { #if HAVE_MPI auto basic_comm = comm.communicator(); #else auto basic_comm = Dune::Communication{}; #endif // HAVE_MPI std::ostringstream os; os << "Linear system "; if (basic_comm.size() > 1) { os << fmt::format("on MPI rank: {} ", basic_comm.rank()); } // The static_cast of Matrix::block_type::rows is needed for fmt version 10. // TODO: Check if the cast is still needed in future versions. os << fmt::format("blocksize: {} size: {:7d} block nonzeroes: {:9d}", static_cast(Matrix::block_type::rows), matrix.N(), matrix.nonzeroes()); DeferredLogger local_logger; local_logger.debug(os.str()); auto global_logger = gatherDeferredLogger(local_logger, basic_comm); if (basic_comm.rank() == 0) { global_logger.logMessages(); } } if (parallel) { #if HAVE_MPI if (!wellOperator_) { using ParOperatorType = Opm::GhostLastMatrixAdapter; auto pop = std::make_unique(matrix, comm); using FlexibleSolverType = Dune::FlexibleSolver; auto sol = std::make_unique(*pop, comm, prm, weightsCalculator, pressureIndex); this->pre_ = &sol->preconditioner(); this->op_ = std::move(pop); this->solver_ = std::move(sol); } else { using ParOperatorType = WellModelGhostLastMatrixAdapter; auto pop = std::make_unique(matrix, *wellOperator_, interiorCellNum_); using FlexibleSolverType = Dune::FlexibleSolver; auto sol = std::make_unique(*pop, comm, prm, weightsCalculator, pressureIndex); this->pre_ = &sol->preconditioner(); this->op_ = std::move(pop); this->solver_ = std::move(sol); } #endif } else { if (!wellOperator_) { using SeqOperatorType = Dune::MatrixAdapter; auto sop = std::make_unique(matrix); using FlexibleSolverType = Dune::FlexibleSolver; auto sol = std::make_unique(*sop, prm, weightsCalculator, pressureIndex); this->pre_ = &sol->preconditioner(); this->op_ = std::move(sop); this->solver_ = std::move(sol); } else { using SeqOperatorType = WellModelMatrixAdapter; auto sop = std::make_unique(matrix, *wellOperator_); using FlexibleSolverType = Dune::FlexibleSolver; auto sol = std::make_unique(*sop, prm, weightsCalculator, pressureIndex); this->pre_ = &sol->preconditioner(); this->op_ = std::move(sop); this->solver_ = std::move(sol); } } } template using BM = Dune::BCRSMatrix>; template using BV = Dune::BlockVector>; #if HAVE_MPI using CommunicationType = Dune::OwnerOverlapCopyCommunication; #else using CommunicationType = Dune::Communication; #endif #define INSTANTIATE_FLEX(T,Dim) \ template void makeOverlapRowsInvalid>(BM&, const std::vector&); \ template struct FlexibleSolverInfo,BV,CommunicationType>; #define INSTANTIATE_TYPE(T) \ INSTANTIATE_FLEX(T,1) \ INSTANTIATE_FLEX(T,2) \ INSTANTIATE_FLEX(T,3) \ INSTANTIATE_FLEX(T,4) \ INSTANTIATE_FLEX(T,5) \ INSTANTIATE_FLEX(T,6) INSTANTIATE_TYPE(double) #if FLOW_INSTANTIATE_FLOAT INSTANTIATE_TYPE(float) #endif } }