Allow to choose amgcl backend at runtime, added default amgcl params

This commit is contained in:
Tong Dong Qiu 2021-06-22 10:51:57 +02:00
parent 282f611f92
commit e556124405
2 changed files with 79 additions and 56 deletions

View File

@ -65,19 +65,37 @@ void amgclSolverBackend<block_size>::initialize(int N_, int nnz_, int dim, doubl
rhs.resize(N); rhs.resize(N);
x.resize(N); x.resize(N);
#if AMGCL_CUDA // try to read amgcl parameters via json file
cudaDeviceProp prop; std::string filename = "amgcl_options.json";
cudaGetDeviceProperties(&prop, deviceID); std::ifstream file(filename);
std::cout << prop.name << std::endl;
cusparseCreate(&bprm.cusparse_handle);
#endif
// read amgcl parameters via json file if (file.is_open()) { // if file exists, read parameters from file
std::ifstream file("amgcl_options.json"); boost::property_tree::read_json(file, prm);
boost::property_tree::read_json(file, prm);
// print json parameters backend_type_cuda = prm.get("backend_type_cuda", false); // defaults to false if not specified
boost::property_tree::write_json(std::cout, prm); out << "Using parameters from " << filename << ":\n";
} else { // otherwise use default parameters, same as Dune
prm.put("backend_type_cuda", false);
prm.put("precond.class", "relaxation");
prm.put("precond.type", "ilu0");
prm.put("precond.damping", 0.9);
prm.put("solver.type", "bicgstab");
prm.put("solver.tol", tolerance);
prm.put("solver.maxiter", maxit);
prm.put("solver.verbose", verbosity >= 2);
out << "Using default amgcl parameters:\n";
}
boost::property_tree::write_json(out, prm); // print amgcl parameters
prm.erase("backend_type_cuda"); // delete custom parameter, otherwise amgcl prints a warning
if (backend_type_cuda) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceID);
out << prop.name << std::endl;
cusparseCreate(&CUDA_bprm.cusparse_handle);
}
OpmLog::info(out.str());
initialized = true; initialized = true;
} // end initialize() } // end initialize()
@ -146,46 +164,56 @@ void amgclSolverBackend<block_size>::solve_system(double *b, WellContributions &
double error = 0.0; double error = 0.0;
try { try {
// create matrix object if (backend_type_cuda) { // use CUDA
#if AMGCL_CUDA // create matrix object
auto A = std::tie(N, A_rows, A_cols, A_vals); auto A = std::tie(N, A_rows, A_cols, A_vals);
#else
auto Atmp = std::tie(N, A_rows, A_cols, A_vals);
auto A = amgcl::adapter::block_matrix<dmat_type>(Atmp);
#endif
// create solver and construct preconditioner // create solver and construct preconditioner
// don't reuse this unless the preconditioner can be reused // don't reuse this unless the preconditioner can be reused
Solver solve(A, prm, bprm); CUDA_Solver solve(A, prm, CUDA_bprm);
// print info (once) // print solver structure (once)
std::call_once(print_info, [&](){ std::call_once(print_info, [&](){
// print solver structure std::ostringstream out;
std::cout << solve << std::endl; out << solve << std::endl;
}); OpmLog::info(out.str());
});
#if AMGCL_CUDA thrust::device_vector<double> B(b, b + N);
thrust::device_vector<double> B(b, b + N); thrust::device_vector<double> X(N, 0.0);
thrust::device_vector<double> X(N, 0.0);
// actually solve // actually solve
std::tie(iters, error) = solve(B, X); std::tie(iters, error) = solve(B, X);
thrust::copy(X.begin(), X.end(), x.begin()); thrust::copy(X.begin(), X.end(), x.begin());
#else } else { // use builtin backend (CPU)
// reset x vector // create matrix object
std::fill(x.begin(), x.end(), 0.0); auto Atmp = std::tie(N, A_rows, A_cols, A_vals);
auto A = amgcl::adapter::block_matrix<dmat_type>(Atmp);
// create blocked vectors // create solver and construct preconditioner
auto b_ptr = reinterpret_cast<dvec_type*>(b); // don't reuse this unless the preconditioner can be reused
auto x_ptr = reinterpret_cast<dvec_type*>(x.data()); CPU_Solver solve(A, prm);
auto B = amgcl::make_iterator_range(b_ptr, b_ptr + N / block_size);
auto X = amgcl::make_iterator_range(x_ptr, x_ptr + N / block_size);
// actually solve // print solver structure (once)
std::tie(iters, error) = solve(B, X); std::call_once(print_info, [&](){
#endif std::ostringstream out;
out << solve << std::endl;
OpmLog::info(out.str());
});
// reset x vector
std::fill(x.begin(), x.end(), 0.0);
// create blocked vectors
auto b_ptr = reinterpret_cast<dvec_type*>(b);
auto x_ptr = reinterpret_cast<dvec_type*>(x.data());
auto B = amgcl::make_iterator_range(b_ptr, b_ptr + N / block_size);
auto X = amgcl::make_iterator_range(x_ptr, x_ptr + N / block_size);
// actually solve
std::tie(iters, error) = solve(B, X);
}
} catch (const std::exception& ex) { } catch (const std::exception& ex) {
std::cerr << "Caught exception: " << ex.what() << std::endl; std::cerr << "Caught exception: " << ex.what() << std::endl;
throw ex; throw ex;

View File

@ -43,8 +43,6 @@
#include <amgcl/relaxation/cusparse_ilu0.hpp> #include <amgcl/relaxation/cusparse_ilu0.hpp>
#endif #endif
#define AMGCL_CUDA 0
namespace bda namespace bda
{ {
@ -66,18 +64,13 @@ class amgclSolverBackend : public BdaSolver<block_size>
using Base::tolerance; using Base::tolerance;
using Base::initialized; using Base::initialized;
#if AMGCL_CUDA typedef amgcl::backend::cuda<double> CUDA_Backend;
#if !HAVE_CUDA
#error "Error amgcl is trying to use CUDA, but CUDA was not found by CMake"
#endif
typedef amgcl::backend::cuda<double> Backend;
#else
typedef amgcl::static_matrix<double, block_size, block_size> dmat_type; // matrix value type in double precision typedef amgcl::static_matrix<double, block_size, block_size> dmat_type; // matrix value type in double precision
typedef amgcl::static_matrix<double, block_size, 1> dvec_type; // the corresponding vector value type typedef amgcl::static_matrix<double, block_size, 1> dvec_type; // the corresponding vector value type
typedef amgcl::backend::builtin<dmat_type> Backend; typedef amgcl::backend::builtin<dmat_type> CPU_Backend;
#endif
typedef amgcl::make_solver<amgcl::runtime::preconditioner<Backend>, amgcl::runtime::solver::wrapper<Backend> > Solver; typedef amgcl::make_solver<amgcl::runtime::preconditioner<CUDA_Backend>, amgcl::runtime::solver::wrapper<CUDA_Backend> > CUDA_Solver;
typedef amgcl::make_solver<amgcl::runtime::preconditioner<CPU_Backend>, amgcl::runtime::solver::wrapper<CPU_Backend> > CPU_Solver;
private: private:
// store matrix in CSR format // store matrix in CSR format
@ -85,9 +78,11 @@ private:
std::vector<double> A_vals, rhs; std::vector<double> A_vals, rhs;
std::vector<double> x; std::vector<double> x;
std::once_flag print_info; std::once_flag print_info;
bool backend_type_cuda = false; // true if amgcl uses cuda, otherwise use cpu backend
// if more backend are supported (vexcl), turn into enum
boost::property_tree::ptree prm; boost::property_tree::ptree prm; // amgcl parameters
typename Backend::params bprm; // amgcl backend parameters, only used for cusparseHandle typename CUDA_Backend::params CUDA_bprm; // amgcl backend parameters, only used for cusparseHandle
/// Initialize GPU and allocate memory /// Initialize GPU and allocate memory
/// \param[in] N number of nonzeroes, divide by dim*dim to get number of blocks /// \param[in] N number of nonzeroes, divide by dim*dim to get number of blocks