/* Copyright 2022-2023 SINTEF AS This file is part of the Open Porous Media project (OPM). OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OPM. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Opm::gpuistl { template GpuDILU::GpuDILU(const M& A, bool splitMatrix, bool tuneKernels) : m_cpuMatrix(A) , m_levelSets(Opm::getMatrixRowColoring(m_cpuMatrix, Opm::ColoringType::LOWER)) , m_reorderedToNatural(detail::createReorderedToNatural(m_levelSets)) , m_naturalToReordered(detail::createNaturalToReordered(m_levelSets)) , m_gpuMatrix(GpuSparseMatrix::fromMatrix(m_cpuMatrix, true)) , m_gpuNaturalToReorder(m_naturalToReordered) , m_gpuReorderToNatural(m_reorderedToNatural) , m_gpuDInv(m_gpuMatrix.N() * m_gpuMatrix.blockSize() * m_gpuMatrix.blockSize()) , m_splitMatrix(splitMatrix) , m_tuneThreadBlockSizes(tuneKernels) { // TODO: Should in some way verify that this matrix is symmetric, only do it debug mode? // Some sanity check OPM_ERROR_IF(A.N() != m_gpuMatrix.N(), fmt::format("CuSparse matrix not same size as DUNE matrix. {} vs {}.", m_gpuMatrix.N(), A.N())); OPM_ERROR_IF(A[0][0].N() != m_gpuMatrix.blockSize(), fmt::format("CuSparse matrix not same blocksize as DUNE matrix. {} vs {}.", m_gpuMatrix.blockSize(), A[0][0].N())); OPM_ERROR_IF(A.N() * A[0][0].N() != m_gpuMatrix.dim(), fmt::format("CuSparse matrix not same dimension as DUNE matrix. {} vs {}.", m_gpuMatrix.dim(), A.N() * A[0][0].N())); OPM_ERROR_IF(A.nonzeroes() != m_gpuMatrix.nonzeroes(), fmt::format("CuSparse matrix not same number of non zeroes as DUNE matrix. {} vs {}. ", m_gpuMatrix.nonzeroes(), A.nonzeroes())); if (m_splitMatrix) { m_gpuMatrixReorderedDiag = std::make_unique>(blocksize_ * blocksize_ * m_cpuMatrix.N()); std::tie(m_gpuMatrixReorderedLower, m_gpuMatrixReorderedUpper) = detail::extractLowerAndUpperMatrices>(m_cpuMatrix, m_reorderedToNatural); } else { m_gpuMatrixReordered = detail::createReorderedMatrix>( m_cpuMatrix, m_reorderedToNatural); } computeDiagAndMoveReorderedData(m_moveThreadBlockSize, m_DILUFactorizationThreadBlockSize); if (m_tuneThreadBlockSizes) { tuneThreadBlockSizes(); } } template void GpuDILU::pre([[maybe_unused]] X& x, [[maybe_unused]] Y& b) { } template void GpuDILU::apply(X& v, const Y& d) { OPM_TIMEBLOCK(prec_apply); { apply(v, d, m_lowerSolveThreadBlockSize, m_upperSolveThreadBlockSize); } } template void GpuDILU::apply(X& v, const Y& d, int lowerSolveThreadBlockSize, int upperSolveThreadBlockSize) { int levelStartIdx = 0; for (int level = 0; level < m_levelSets.size(); ++level) { const int numOfRowsInLevel = m_levelSets[level].size(); if (m_splitMatrix) { detail::DILU::solveLowerLevelSetSplit( m_gpuMatrixReorderedLower->getNonZeroValues().data(), m_gpuMatrixReorderedLower->getRowIndices().data(), m_gpuMatrixReorderedLower->getColumnIndices().data(), m_gpuReorderToNatural.data(), levelStartIdx, numOfRowsInLevel, m_gpuDInv.data(), d.data(), v.data(), lowerSolveThreadBlockSize); } else { detail::DILU::solveLowerLevelSet( m_gpuMatrixReordered->getNonZeroValues().data(), m_gpuMatrixReordered->getRowIndices().data(), m_gpuMatrixReordered->getColumnIndices().data(), m_gpuReorderToNatural.data(), levelStartIdx, numOfRowsInLevel, m_gpuDInv.data(), d.data(), v.data(), lowerSolveThreadBlockSize); } levelStartIdx += numOfRowsInLevel; } levelStartIdx = m_cpuMatrix.N(); // upper triangular solve: (D + U_A) v = Dy for (int level = m_levelSets.size() - 1; level >= 0; --level) { const int numOfRowsInLevel = m_levelSets[level].size(); levelStartIdx -= numOfRowsInLevel; if (m_splitMatrix) { detail::DILU::solveUpperLevelSetSplit( m_gpuMatrixReorderedUpper->getNonZeroValues().data(), m_gpuMatrixReorderedUpper->getRowIndices().data(), m_gpuMatrixReorderedUpper->getColumnIndices().data(), m_gpuReorderToNatural.data(), levelStartIdx, numOfRowsInLevel, m_gpuDInv.data(), v.data(), upperSolveThreadBlockSize); } else { detail::DILU::solveUpperLevelSet( m_gpuMatrixReordered->getNonZeroValues().data(), m_gpuMatrixReordered->getRowIndices().data(), m_gpuMatrixReordered->getColumnIndices().data(), m_gpuReorderToNatural.data(), levelStartIdx, numOfRowsInLevel, m_gpuDInv.data(), v.data(), upperSolveThreadBlockSize); } } } template void GpuDILU::post([[maybe_unused]] X& x) { } template Dune::SolverCategory::Category GpuDILU::category() const { return Dune::SolverCategory::sequential; } template void GpuDILU::update() { OPM_TIMEBLOCK(prec_update); { update(m_moveThreadBlockSize, m_DILUFactorizationThreadBlockSize); } } template void GpuDILU::update(int moveThreadBlockSize, int factorizationBlockSize) { m_gpuMatrix.updateNonzeroValues(m_cpuMatrix, true); // send updated matrix to the gpu computeDiagAndMoveReorderedData(moveThreadBlockSize, factorizationBlockSize); } template void GpuDILU::computeDiagAndMoveReorderedData(int moveThreadBlockSize, int factorizationBlockSize) { if (m_splitMatrix) { detail::copyMatDataToReorderedSplit( m_gpuMatrix.getNonZeroValues().data(), m_gpuMatrix.getRowIndices().data(), m_gpuMatrix.getColumnIndices().data(), m_gpuMatrixReorderedLower->getNonZeroValues().data(), m_gpuMatrixReorderedLower->getRowIndices().data(), m_gpuMatrixReorderedUpper->getNonZeroValues().data(), m_gpuMatrixReorderedUpper->getRowIndices().data(), m_gpuMatrixReorderedDiag->data(), m_gpuNaturalToReorder.data(), m_gpuMatrixReorderedLower->N(), moveThreadBlockSize); } else { detail::copyMatDataToReordered(m_gpuMatrix.getNonZeroValues().data(), m_gpuMatrix.getRowIndices().data(), m_gpuMatrixReordered->getNonZeroValues().data(), m_gpuMatrixReordered->getRowIndices().data(), m_gpuNaturalToReorder.data(), m_gpuMatrixReordered->N(), moveThreadBlockSize); } int levelStartIdx = 0; for (int level = 0; level < m_levelSets.size(); ++level) { const int numOfRowsInLevel = m_levelSets[level].size(); if (m_splitMatrix) { detail::DILU::computeDiluDiagonalSplit( m_gpuMatrixReorderedLower->getNonZeroValues().data(), m_gpuMatrixReorderedLower->getRowIndices().data(), m_gpuMatrixReorderedLower->getColumnIndices().data(), m_gpuMatrixReorderedUpper->getNonZeroValues().data(), m_gpuMatrixReorderedUpper->getRowIndices().data(), m_gpuMatrixReorderedUpper->getColumnIndices().data(), m_gpuMatrixReorderedDiag->data(), m_gpuReorderToNatural.data(), m_gpuNaturalToReorder.data(), levelStartIdx, numOfRowsInLevel, m_gpuDInv.data(), factorizationBlockSize); } else { detail::DILU::computeDiluDiagonal( m_gpuMatrixReordered->getNonZeroValues().data(), m_gpuMatrixReordered->getRowIndices().data(), m_gpuMatrixReordered->getColumnIndices().data(), m_gpuReorderToNatural.data(), m_gpuNaturalToReorder.data(), levelStartIdx, numOfRowsInLevel, m_gpuDInv.data(), factorizationBlockSize); } levelStartIdx += numOfRowsInLevel; } } template void GpuDILU::tuneThreadBlockSizes() { // tune the thread-block size of the update function auto tuneMoveThreadBlockSizeInUpdate = [this](int moveThreadBlockSize){ this->update(moveThreadBlockSize, m_DILUFactorizationThreadBlockSize); }; m_moveThreadBlockSize = detail::tuneThreadBlockSize(tuneMoveThreadBlockSizeInUpdate, "Kernel moving data to reordered matrix"); auto tuneFactorizationThreadBlockSizeInUpdate = [this](int factorizationThreadBlockSize){ this->update(m_moveThreadBlockSize, factorizationThreadBlockSize); }; m_DILUFactorizationThreadBlockSize = detail::tuneThreadBlockSize(tuneFactorizationThreadBlockSizeInUpdate, "Kernel computing DILU factorization"); // tune the thread-block size of the apply GpuVector tmpV(m_gpuMatrix.N() * m_gpuMatrix.blockSize()); GpuVector tmpD(m_gpuMatrix.N() * m_gpuMatrix.blockSize()); tmpD = 1; auto tuneLowerSolveThreadBlockSizeInApply = [this, &tmpV, &tmpD](int lowerSolveThreadBlockSize){ this->apply(tmpV, tmpD, lowerSolveThreadBlockSize, m_DILUFactorizationThreadBlockSize); }; m_lowerSolveThreadBlockSize = detail::tuneThreadBlockSize(tuneLowerSolveThreadBlockSizeInApply, "Kernel computing a lower triangular solve for a level set"); auto tuneUpperSolveThreadBlockSizeInApply = [this, &tmpV, &tmpD](int upperSolveThreadBlockSize){ this->apply(tmpV, tmpD, m_moveThreadBlockSize, upperSolveThreadBlockSize); }; m_upperSolveThreadBlockSize = detail::tuneThreadBlockSize(tuneUpperSolveThreadBlockSizeInApply, "Kernel computing an upper triangular solve for a level set"); } } // namespace Opm::gpuistl #define INSTANTIATE_CUDILU_DUNE(realtype, blockdim) \ template class ::Opm::gpuistl::GpuDILU>, \ ::Opm::gpuistl::GpuVector, \ ::Opm::gpuistl::GpuVector>; \ template class ::Opm::gpuistl::GpuDILU>, \ ::Opm::gpuistl::GpuVector, \ ::Opm::gpuistl::GpuVector> INSTANTIATE_CUDILU_DUNE(double, 1); INSTANTIATE_CUDILU_DUNE(double, 2); INSTANTIATE_CUDILU_DUNE(double, 3); INSTANTIATE_CUDILU_DUNE(double, 4); INSTANTIATE_CUDILU_DUNE(double, 5); INSTANTIATE_CUDILU_DUNE(double, 6); INSTANTIATE_CUDILU_DUNE(float, 1); INSTANTIATE_CUDILU_DUNE(float, 2); INSTANTIATE_CUDILU_DUNE(float, 3); INSTANTIATE_CUDILU_DUNE(float, 4); INSTANTIATE_CUDILU_DUNE(float, 5); INSTANTIATE_CUDILU_DUNE(float, 6);