mirror of
https://github.com/OPM/opm-simulators.git
synced 2024-11-22 09:16:27 -06:00
refacor cujac
This commit is contained in:
parent
d17ee3315b
commit
158619083e
@ -548,7 +548,7 @@ if(CUDA_FOUND)
|
||||
cuda_safe_call
|
||||
cuda_check_last_error
|
||||
cublas_handle
|
||||
cujac
|
||||
GpuJac
|
||||
GpuDILU
|
||||
cusparse_handle
|
||||
cuSparse_matrix_operations
|
||||
|
@ -219,7 +219,7 @@ if (HAVE_CUDA)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg CuSparseMatrix.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg GpuDILU.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg OpmCuILU0.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg CuJac.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg GpuJac.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg CuSeqILU0.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(MAIN_SOURCE_FILES opm/simulators/linalg set_device.cpp)
|
||||
|
||||
@ -239,7 +239,7 @@ if (HAVE_CUDA)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg detail/preconditionerKernels/JacKernels.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg GpuDILU.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg OpmCuILU0.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg CuJac.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg GpuJac.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg CuVector.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg CuView.hpp)
|
||||
ADD_CUDA_OR_HIP_FILE(PUBLIC_HEADER_FILES opm/simulators/linalg CuSparseMatrix.hpp)
|
||||
@ -395,7 +395,7 @@ if (HAVE_CUDA)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_cuda_safe_call.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_cuda_check_last_error.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_GpuDILU.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_cujac.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_GpuJac.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_cuowneroverlapcopy.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_cuseqilu0.cpp)
|
||||
ADD_CUDA_OR_HIP_FILE(TEST_SOURCE_FILES tests test_cusparse_handle.cpp)
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include <opm/simulators/linalg/hipistl/GpuBlockPreconditioner.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/GpuDILU.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/OpmCuILU0.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/CuJac.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/GpuJac.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/CuSeqILU0.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/PreconditionerAdapter.hpp>
|
||||
#include <opm/simulators/linalg/hipistl/PreconditionerConvertFieldTypeAdapter.hpp>
|
||||
@ -34,7 +34,7 @@
|
||||
#include <opm/simulators/linalg/cuistl/GpuBlockPreconditioner.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/GpuDILU.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/OpmCuILU0.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/GpuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuSeqILU0.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/PreconditionerAdapter.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/PreconditionerConvertFieldTypeAdapter.hpp>
|
||||
|
@ -334,26 +334,26 @@ struct StandardPreconditioners {
|
||||
return wrapped;
|
||||
});
|
||||
|
||||
F::addCreator("CUJac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
|
||||
F::addCreator("GPUJAC", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
|
||||
const double w = prm.get<double>("relaxation", 1.0);
|
||||
using field_type = typename V::field_type;
|
||||
using CuJac =
|
||||
typename gpuistl::CuJac<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
auto cuJac = std::make_shared<CuJac>(op.getmat(), w);
|
||||
using GpuJac =
|
||||
typename gpuistl::GpuJac<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
auto gpuJac = std::make_shared<GpuJac>(op.getmat(), w);
|
||||
|
||||
auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, CuJac>>(cuJac);
|
||||
auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuJac>>(gpuJac);
|
||||
auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
|
||||
return wrapped;
|
||||
});
|
||||
|
||||
F::addCreator("CUDILU", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
|
||||
F::addCreator("GPUDILU", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
|
||||
const bool split_matrix = prm.get<bool>("split_matrix", true);
|
||||
const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
|
||||
using field_type = typename V::field_type;
|
||||
using GpuDILU = typename gpuistl::GpuDILU<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
auto cuDILU = std::make_shared<GpuDILU>(op.getmat(), split_matrix, tune_gpu_kernels);
|
||||
auto gpuDILU = std::make_shared<GpuDILU>(op.getmat(), split_matrix, tune_gpu_kernels);
|
||||
|
||||
auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuDILU>>(cuDILU);
|
||||
auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuDILU>>(gpuDILU);
|
||||
auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
|
||||
return wrapped;
|
||||
});
|
||||
@ -607,13 +607,13 @@ struct StandardPreconditioners<Operator, Dune::Amg::SequentialInformation> {
|
||||
return converted;
|
||||
});
|
||||
|
||||
F::addCreator("CUJac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
|
||||
F::addCreator("GPUJAC", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
|
||||
const double w = prm.get<double>("relaxation", 1.0);
|
||||
using field_type = typename V::field_type;
|
||||
using CUJac =
|
||||
typename gpuistl::CuJac<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
return std::make_shared<gpuistl::PreconditionerAdapter<V, V, CUJac>>(
|
||||
std::make_shared<CUJac>(op.getmat(), w));
|
||||
using GPUJac =
|
||||
typename gpuistl::GpuJac<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
return std::make_shared<gpuistl::PreconditionerAdapter<V, V, GPUJac>>(
|
||||
std::make_shared<GPUJac>(op.getmat(), w));
|
||||
});
|
||||
|
||||
F::addCreator("OPMCUILU0", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
|
||||
@ -625,15 +625,15 @@ struct StandardPreconditioners<Operator, Dune::Amg::SequentialInformation> {
|
||||
return std::make_shared<gpuistl::PreconditionerAdapter<V, V, CUILU0>>(std::make_shared<CUILU0>(op.getmat(), split_matrix, tune_gpu_kernels));
|
||||
});
|
||||
|
||||
F::addCreator("CUDILU", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
|
||||
F::addCreator("GPUDILU", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
|
||||
const bool split_matrix = prm.get<bool>("split_matrix", true);
|
||||
const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
|
||||
using field_type = typename V::field_type;
|
||||
using CUDILU = typename gpuistl::GpuDILU<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
return std::make_shared<gpuistl::PreconditionerAdapter<V, V, CUDILU>>(std::make_shared<CUDILU>(op.getmat(), split_matrix, tune_gpu_kernels));
|
||||
using GPUDILU = typename gpuistl::GpuDILU<M, gpuistl::CuVector<field_type>, gpuistl::CuVector<field_type>>;
|
||||
return std::make_shared<gpuistl::PreconditionerAdapter<V, V, GPUDILU>>(std::make_shared<GPUDILU>(op.getmat(), split_matrix, tune_gpu_kernels));
|
||||
});
|
||||
|
||||
F::addCreator("CUDILUFloat", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
|
||||
F::addCreator("GPUDILUFloat", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
|
||||
const bool split_matrix = prm.get<bool>("split_matrix", true);
|
||||
const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
|
||||
using block_type = typename V::block_type;
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <dune/istl/bcrsmatrix.hh>
|
||||
#include <fmt/core.h>
|
||||
#include <opm/common/ErrorMacros.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/GpuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuVector.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/detail/preconditionerKernels/JacKernels.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/detail/vector_operations.hpp>
|
||||
@ -30,7 +30,7 @@ namespace Opm::gpuistl
|
||||
{
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
CuJac<M, X, Y, l>::CuJac(const M& A, field_type w)
|
||||
GpuJac<M, X, Y, l>::GpuJac(const M& A, field_type w)
|
||||
: m_cpuMatrix(A)
|
||||
, m_relaxationFactor(w)
|
||||
, m_gpuMatrix(CuSparseMatrix<field_type>::fromMatrix(A))
|
||||
@ -58,13 +58,13 @@ CuJac<M, X, Y, l>::CuJac(const M& A, field_type w)
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
void
|
||||
CuJac<M, X, Y, l>::pre([[maybe_unused]] X& x, [[maybe_unused]] Y& b)
|
||||
GpuJac<M, X, Y, l>::pre([[maybe_unused]] X& x, [[maybe_unused]] Y& b)
|
||||
{
|
||||
}
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
void
|
||||
CuJac<M, X, Y, l>::apply(X& v, const Y& d)
|
||||
GpuJac<M, X, Y, l>::apply(X& v, const Y& d)
|
||||
{
|
||||
// Jacobi preconditioner: x_{n+1} = x_n + w * (D^-1 * (b - Ax_n) )
|
||||
// Working with defect d and update v it we only need to set v = w*(D^-1)*d
|
||||
@ -77,20 +77,20 @@ CuJac<M, X, Y, l>::apply(X& v, const Y& d)
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
void
|
||||
CuJac<M, X, Y, l>::post([[maybe_unused]] X& x)
|
||||
GpuJac<M, X, Y, l>::post([[maybe_unused]] X& x)
|
||||
{
|
||||
}
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
Dune::SolverCategory::Category
|
||||
CuJac<M, X, Y, l>::category() const
|
||||
GpuJac<M, X, Y, l>::category() const
|
||||
{
|
||||
return Dune::SolverCategory::sequential;
|
||||
}
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
void
|
||||
CuJac<M, X, Y, l>::update()
|
||||
GpuJac<M, X, Y, l>::update()
|
||||
{
|
||||
m_gpuMatrix.updateNonzeroValues(m_cpuMatrix);
|
||||
invertDiagonalAndFlatten();
|
||||
@ -98,7 +98,7 @@ CuJac<M, X, Y, l>::update()
|
||||
|
||||
template <class M, class X, class Y, int l>
|
||||
void
|
||||
CuJac<M, X, Y, l>::invertDiagonalAndFlatten()
|
||||
GpuJac<M, X, Y, l>::invertDiagonalAndFlatten()
|
||||
{
|
||||
detail::JAC::invertDiagonalAndFlatten<field_type, matrix_type::block_type::cols>(
|
||||
m_gpuMatrix.getNonZeroValues().data(),
|
||||
@ -110,10 +110,10 @@ CuJac<M, X, Y, l>::invertDiagonalAndFlatten()
|
||||
|
||||
} // namespace Opm::gpuistl
|
||||
#define INSTANTIATE_CUJAC_DUNE(realtype, blockdim) \
|
||||
template class ::Opm::gpuistl::CuJac<Dune::BCRSMatrix<Dune::FieldMatrix<realtype, blockdim, blockdim>>, \
|
||||
template class ::Opm::gpuistl::GpuJac<Dune::BCRSMatrix<Dune::FieldMatrix<realtype, blockdim, blockdim>>, \
|
||||
::Opm::gpuistl::CuVector<realtype>, \
|
||||
::Opm::gpuistl::CuVector<realtype>>; \
|
||||
template class ::Opm::gpuistl::CuJac<Dune::BCRSMatrix<Opm::MatrixBlock<realtype, blockdim, blockdim>>, \
|
||||
template class ::Opm::gpuistl::GpuJac<Dune::BCRSMatrix<Opm::MatrixBlock<realtype, blockdim, blockdim>>, \
|
||||
::Opm::gpuistl::CuVector<realtype>, \
|
||||
::Opm::gpuistl::CuVector<realtype>>
|
||||
|
@ -16,8 +16,8 @@
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with OPM. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef OPM_CUJAC_HPP
|
||||
#define OPM_CUJAC_HPP
|
||||
#ifndef OPM_GPUJAC_HPP
|
||||
#define OPM_GPUJAC_HPP
|
||||
|
||||
#include <dune/istl/preconditioner.hh>
|
||||
#include <opm/simulators/linalg/PreconditionerWithUpdate.hpp>
|
||||
@ -43,7 +43,7 @@ namespace Opm::gpuistl
|
||||
//! \note We assume X and Y are both CuVector<real_type>, but we leave them as template
|
||||
//! arguments in case of future additions.
|
||||
template <class M, class X, class Y, int l = 1>
|
||||
class CuJac : public Dune::PreconditionerWithUpdate<X, Y>
|
||||
class GpuJac : public Dune::PreconditionerWithUpdate<X, Y>
|
||||
{
|
||||
public:
|
||||
//! \brief The matrix type the preconditioner is for.
|
||||
@ -61,7 +61,7 @@ public:
|
||||
//! \param A The matrix to operate on.
|
||||
//! \param w The relaxation factor.
|
||||
//!
|
||||
CuJac(const M& A, field_type w);
|
||||
GpuJac(const M& A, field_type w);
|
||||
|
||||
//! \brief Prepare the preconditioner.
|
||||
//! \note Does nothing at the time being.
|
@ -18,12 +18,12 @@
|
||||
*/
|
||||
#include <config.h>
|
||||
|
||||
#define BOOST_TEST_MODULE TestCuJac
|
||||
#define BOOST_TEST_MODULE TestGpuJac
|
||||
#include <boost/mpl/list.hpp>
|
||||
#include <boost/test/unit_test.hpp>
|
||||
#include <cuda_runtime.h>
|
||||
#include <dune/istl/bcrsmatrix.hh>
|
||||
#include <opm/simulators/linalg/cuistl/CuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/GpuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuSparseMatrix.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuVector.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/PreconditionerAdapter.hpp>
|
||||
@ -33,7 +33,7 @@
|
||||
|
||||
using NumericTypes = boost::mpl::list<double, float>;
|
||||
|
||||
BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize2, T, NumericTypes)
|
||||
BOOST_AUTO_TEST_CASE_TEMPLATE(GPUJACApplyBlocksize2, T, NumericTypes)
|
||||
{
|
||||
/*
|
||||
Test data to validate jacobi preconditioner, expected result is x_1, and relaxation factor is 0.5
|
||||
@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize2, T, NumericTypes)
|
||||
using M = Dune::FieldMatrix<T, blocksize, blocksize>;
|
||||
using SpMatrix = Dune::BCRSMatrix<M>;
|
||||
using Vector = Dune::BlockVector<Dune::FieldVector<T, blocksize>>;
|
||||
using CuJac = Opm::gpuistl::CuJac<SpMatrix, Opm::gpuistl::CuVector<T>, Opm::gpuistl::CuVector<T>>;
|
||||
using GpuJac = Opm::gpuistl::GpuJac<SpMatrix, Opm::gpuistl::CuVector<T>, Opm::gpuistl::CuVector<T>>;
|
||||
|
||||
SpMatrix B(N, N, nonZeroes, SpMatrix::row_wise);
|
||||
for (auto row = B.createbegin(); row != B.createend(); ++row) {
|
||||
@ -70,7 +70,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize2, T, NumericTypes)
|
||||
B[1][1][0][0] = -1.0;
|
||||
B[1][1][1][1] = -1.0;
|
||||
|
||||
auto cujac = Opm::gpuistl::PreconditionerAdapter<Vector, Vector, CuJac>(std::make_shared<CuJac>(B, 0.5));
|
||||
auto gpujac = Opm::gpuistl::PreconditionerAdapter<Vector, Vector, GpuJac>(std::make_shared<GpuJac>(B, 0.5));
|
||||
|
||||
Vector vVector(2);
|
||||
Vector dVector(2);
|
||||
@ -81,14 +81,14 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize2, T, NumericTypes)
|
||||
|
||||
const T expectedAns[2][2] = {{1.0 / 2.0, -1.0 / 2.0}, {-3.0 / 2.0, -2.0}};
|
||||
|
||||
cujac.apply(vVector, dVector);
|
||||
gpujac.apply(vVector, dVector);
|
||||
BOOST_CHECK_CLOSE(vVector[0][0], expectedAns[0][0], 1e-7);
|
||||
BOOST_CHECK_CLOSE(vVector[0][1], expectedAns[0][1], 1e-7);
|
||||
BOOST_CHECK_CLOSE(vVector[1][0], expectedAns[1][0], 1e-7);
|
||||
BOOST_CHECK_CLOSE(vVector[1][1], expectedAns[1][1], 1e-7);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize1, T, NumericTypes)
|
||||
BOOST_AUTO_TEST_CASE_TEMPLATE(GPUJACApplyBlocksize1, T, NumericTypes)
|
||||
{
|
||||
/*
|
||||
Test data to validate jacobi preconditioner, expected result is x_1, relaxation factor is 0.5
|
||||
@ -103,7 +103,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize1, T, NumericTypes)
|
||||
using M = Dune::FieldMatrix<T, blocksize, blocksize>;
|
||||
using SpMatrix = Dune::BCRSMatrix<M>;
|
||||
using Vector = Dune::BlockVector<Dune::FieldVector<T, blocksize>>;
|
||||
using CuJac = Opm::gpuistl::CuJac<SpMatrix, Opm::gpuistl::CuVector<T>, Opm::gpuistl::CuVector<T>>;
|
||||
using GpuJac = Opm::gpuistl::GpuJac<SpMatrix, Opm::gpuistl::CuVector<T>, Opm::gpuistl::CuVector<T>>;
|
||||
|
||||
SpMatrix B(N, N, nonZeroes, SpMatrix::row_wise);
|
||||
for (auto row = B.createbegin(); row != B.createend(); ++row) {
|
||||
@ -129,7 +129,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize1, T, NumericTypes)
|
||||
B[2][2][0][0] = -1.0;
|
||||
B[3][3][0][0] = -1.0;
|
||||
|
||||
auto cujac = Opm::gpuistl::PreconditionerAdapter<Vector, Vector, CuJac>(std::make_shared<CuJac>(B, 0.5));
|
||||
auto gpujac = Opm::gpuistl::PreconditionerAdapter<Vector, Vector, GpuJac>(std::make_shared<GpuJac>(B, 0.5));
|
||||
|
||||
Vector vVector(4);
|
||||
Vector dVector(4);
|
||||
@ -140,7 +140,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(CUJACApplyBlocksize1, T, NumericTypes)
|
||||
|
||||
const T expectedAns[4] = {1.0 / 3.0, 1.0 / 2.0, -3.0 / 2.0, -2.0};
|
||||
|
||||
cujac.apply(vVector, dVector);
|
||||
gpujac.apply(vVector, dVector);
|
||||
BOOST_CHECK_CLOSE(vVector[0], expectedAns[0], 1e-7);
|
||||
BOOST_CHECK_CLOSE(vVector[1], expectedAns[1], 1e-7);
|
||||
BOOST_CHECK_CLOSE(vVector[2], expectedAns[2], 1e-7);
|
@ -23,7 +23,7 @@
|
||||
#include <boost/test/unit_test.hpp>
|
||||
#include <cuda_runtime.h>
|
||||
#include <dune/istl/bcrsmatrix.hh>
|
||||
#include <opm/simulators/linalg/cuistl/CuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/GpuJac.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/CuVector.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/PreconditionerAdapter.hpp>
|
||||
#include <opm/simulators/linalg/cuistl/detail/cusparse_matrix_operations.hpp>
|
||||
|
Loading…
Reference in New Issue
Block a user