Added support for the parallel solvers of dune-istl.

To support this the solveSystem methods of the LinearSolverInterface gets
an optional additional template parameter of type boost::any. It can hold any
copy constructable object. In our case it is used to pass the information about
the parallelization into the solvers of dune-istl without the compiler needing to know
their type. Inside of LinearSolverIstl::solveSystem we check whether the type stored inside of
boost::any is the new ParallelIstlInformation. If this is the case we extract the information
and use the parallel solvers if available, otherwise we solve serial/sequential.

The new ParallelIstlInformation is needed as the OwnerOverlapCopyCommunication is not copy
constructable. This is indeed a design flaw that should and will fixed upstream, but for the
time being we need ParallelIstlInformation to transfer the ParallelIndexSet and RemoteIndices
objects.
This commit is contained in:
Markus Blatt 2014-03-20 21:50:09 +01:00
parent cec51280e7
commit bdc4573e2a
9 changed files with 492 additions and 28 deletions

View File

@ -97,9 +97,10 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const
double* solution,
const boost::any& add) const
{
return solver_->solve(size, nonzeros, ia, ja, sa, rhs, solution);
return solver_->solve(size, nonzeros, ia, ja, sa, rhs, solution, add);
}
void LinearSolverFactory::setTolerance(const double tol)

View File

@ -74,7 +74,8 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const;
double* solution,
const boost::any& add=boost::any()) const;
/// Set tolerance for the linear solver.
/// \param[in] tol tolerance value

View File

@ -20,6 +20,7 @@
#ifndef OPM_LINEARSOLVERINTERFACE_HEADER_INCLUDED
#define OPM_LINEARSOLVERINTERFACE_HEADER_INCLUDED
#include<boost/any.hpp>
struct CSRMatrix;
@ -69,7 +70,8 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const = 0;
double* solution,
const boost::any& add=boost::any()) const = 0;
/// Set tolerance for the linear solver.
/// \param[in] tol tolerance value

View File

@ -23,6 +23,7 @@
#endif
#include <opm/core/linalg/LinearSolverIstl.hpp>
#include <opm/core/linalg/ParallelIstlInformation.hpp>
// Silence compatibility warning from DUNE headers since we don't use
// the deprecated member anyway (in this compilation unit)
@ -35,7 +36,9 @@
#include <dune/istl/bcrsmatrix.hh>
#include <dune/istl/operators.hh>
#include <dune/istl/io.hh>
#include <dune/istl/owneroverlapcopy.hh>
#include <dune/istl/preconditioners.hh>
#include <dune/istl/schwarz.hh>
#include <dune/istl/solvers.hh>
#include <dune/istl/paamg/amg.hh>
#include <dune/istl/paamg/kamg.hh>
@ -134,7 +137,8 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const
double* solution,
const boost::any& comm) const
{
// Build Istl structures from input.
// System matrix
@ -173,10 +177,26 @@ namespace Opm
if (maxit == 0) {
maxit = 5000;
}
Dune::SeqScalarProduct<Vector> sp;
Dune::Amg::SequentialInformation comm;
Operator opA(A);
return solveSystem(opA, x, solution, b, sp, comm, maxit);
#if HAVE_MPI
if(comm.type()==typeid(ParallelISTLInformation))
{
typedef Dune::OwnerOverlapCopyCommunication<int,int> Comm;
const ParallelISTLInformation& info = boost::any_cast<const ParallelISTLInformation&>(comm);
Comm istlComm(info.communicator());
info.copyValuesTo(istlComm.indexSet(), istlComm.remoteIndices());
Dune::OverlappingSchwarzOperator<Mat,Vector,Vector, Comm>
opA(A, istlComm);
Dune::OverlappingSchwarzScalarProduct<Vector,Comm> sp(istlComm);
return solveSystem(opA, x, solution, b, sp, istlComm, maxit);
}
else
#endif
{
Dune::SeqScalarProduct<Vector> sp;
Dune::Amg::SequentialInformation comm;
Operator opA(A);
return solveSystem(opA, x, solution, b, sp, comm, maxit);
}
}
template<class O, class V, class S, class C>
@ -237,24 +257,44 @@ template<class O, class V, class S, class C>
namespace
{
template<class P, class O, class C>
struct SmootherChooser
{
typedef P Type;
};
#if HAVE_MPI
template<class P, class O>
struct SmootherChooser<P, O, Dune::OwnerOverlapCopyCommunication<int,int> >
{
typedef Dune::OwnerOverlapCopyCommunication<int,int> Comm;
typedef Dune::BlockPreconditioner<typename O::domain_type, typename O::range_type,
Comm, P>
Type;
};
#endif
template<class P, class O, class C>
struct PreconditionerTraits
{
typedef std::shared_ptr<P> PointerType;
typedef typename SmootherChooser<P,O,C>::Type SmootherType;
typedef std::shared_ptr<SmootherType> PointerType;
};
template<class P, class O, class C>
typename PreconditionerTraits<P,O>::PointerType
typename PreconditionerTraits<P,O,C>::PointerType
makePreconditioner(O& opA, double relax, const C& comm, int iterations=1)
{
typename Dune::Amg::SmootherTraits<P>::Arguments args;
typename Dune::Amg::ConstructionTraits<P>::Arguments cargs;
typedef typename SmootherChooser<P,O,C>::Type SmootherType;
typedef typename PreconditionerTraits<P,O,C>::PointerType PointerType;
typename Dune::Amg::SmootherTraits<SmootherType>::Arguments args;
typename Dune::Amg::ConstructionTraits<SmootherType>::Arguments cargs;
cargs.setMatrix(opA.getmat());
args.iterations=iterations;
args.relaxationFactor=relax;
cargs.setArgs(args);
cargs.setComm(comm);
return std::shared_ptr<P>(Dune::Amg::ConstructionTraits<P>::construct(cargs));
return PointerType(Dune::Amg::ConstructionTraits<SmootherType>::construct(cargs));
}
template<class O, class S, class C>
@ -322,19 +362,20 @@ template<class O, class V, class S, class C>
#endif
#if SMOOTHER_ILU
typedef Dune::SeqILU0<Mat,Vector,Vector> Smoother;
typedef Dune::SeqILU0<Mat,Vector,Vector> SeqSmoother;
#else
typedef Dune::SeqSOR<Mat,Vector,Vector> Smoother;
typedef Dune::SeqSOR<Mat,Vector,Vector> SeqSmoother;
#endif
typedef typename SmootherChooser<SeqSmoother, O, C>::Type Smoother;
typedef Dune::Amg::CoarsenCriterion<CriterionBase> Criterion;
typedef Dune::Amg::AMG<Operator,Vector,Smoother> Precond;
typedef Dune::Amg::AMG<O,Vector,Smoother,C> Precond;
// Construct preconditioner.
Criterion criterion;
Precond::SmootherArgs smootherArgs;
typename Precond::SmootherArgs smootherArgs;
setUpCriterion(criterion, linsolver_prolongate_factor, verbosity,
linsolver_smooth_steps);
Precond precond(opA, criterion, smootherArgs);
Precond precond(opA, criterion, smootherArgs, comm);
// Construct linear solver.
Dune::CGSolver<Vector> linsolve(opA, sp, precond, tolerance, maxit, verbosity);
@ -359,6 +400,7 @@ template<class O, class V, class S, class C>
double linsolver_prolongate_factor, int linsolver_smooth_steps)
{
// Solve with AMG solver.
Dune::MatrixAdapter<typename O::matrix_type,Vector,Vector> sOpA(opA.getmat());
#if FIRST_DIAGONAL
typedef Dune::Amg::FirstDiagonal CouplingMetric;
@ -385,10 +427,10 @@ template<class O, class V, class S, class C>
Criterion criterion;
setUpCriterion(criterion, linsolver_prolongate_factor, verbosity,
linsolver_smooth_steps);
Precond precond(opA, criterion, smootherArgs);
Precond precond(sOpA, criterion, smootherArgs);
// Construct linear solver.
Dune::GeneralizedPCGSolver<Vector> linsolve(opA, sp, precond, tolerance, maxit, verbosity);
Dune::GeneralizedPCGSolver<Vector> linsolve(sOpA, precond, tolerance, maxit, verbosity);
// Solve system.
Dune::InverseOperatorResult result;
@ -408,6 +450,8 @@ template<class O, class V, class S, class C>
double linsolver_prolongate_factor)
{
// Solve with AMG solver.
typedef Dune::MatrixAdapter<typename O::matrix_type, Vector, Vector> Operator;
Operator sOpA(opA.getmat());
#if FIRST_DIAGONAL
typedef Dune::Amg::FirstDiagonal CouplingMetric;
@ -433,10 +477,10 @@ template<class O, class V, class S, class C>
parms.setNoPreSmoothSteps(smooth_steps);
parms.setNoPostSmoothSteps(smooth_steps);
parms.setProlongationDampingFactor(linsolver_prolongate_factor);
Precond precond(opA, criterion, parms);
Precond precond(sOpA, criterion, parms);
// Construct linear solver.
Dune::GeneralizedPCGSolver<Vector> linsolve(opA, sp, precond, tolerance, maxit, verbosity);
Dune::GeneralizedPCGSolver<Vector> linsolve(sOpA, precond, tolerance, maxit, verbosity);
// Solve system.
Dune::InverseOperatorResult result;

View File

@ -24,12 +24,11 @@
#include <opm/core/linalg/LinearSolverInterface.hpp>
#include <opm/core/utility/parameters/ParameterGroup.hpp>
#include <string>
#include <boost/any.hpp>
namespace Opm
{
/// Concrete class encapsulating some dune-istl linear solvers.
class LinearSolverIstl : public LinearSolverInterface
{
@ -75,7 +74,8 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const;
double* solution,
const boost::any& comm=boost::any()) const;
/// Set tolerance for the residual in dune istl linear solver.
/// \param[in] tol tolerance value

View File

@ -46,7 +46,8 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const
double* solution,
const boost::any&) const
{
CSRMatrix A = {
(size_t)size,

View File

@ -55,7 +55,8 @@ namespace Opm
const int* ja,
const double* sa,
const double* rhs,
double* solution) const;
double* solution,
const boost::any& add=boost::any()) const;
/// Set tolerance for the linear solver.
/// \param[in] tol tolerance value

View File

@ -0,0 +1,160 @@
/*
Copyright 2014 Dr. Markus Blatt - HPC-Simulation-Software & Services
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPM_PARALLELISTLINFORMTION_HEADER_INCLUDED
#define OPM_PARALLELISTLINFORMTION_HEADER_INCLUDED
#if HAVE_MPI && HAVE_DUNE_ISTL
#include "mpi.h"
#include <dune/istl/owneroverlapcopy.hh>
#include <dune/common/parallel/interface.hh>
#include <dune/common/parallel/communicator.hh>
#include <dune/common/enumset.hh>
#include<algorithm>
namespace Opm
{
/// \brief Class that encapsulates the parallelization information needed by the
/// ISTL solvers.
class ParallelISTLInformation
{
public:
/// \brief The type of the parallel index set used.
typedef Dune::OwnerOverlapCopyCommunication<int, int>::ParallelIndexSet ParallelIndexSet;
/// \brief The type of the remote indices information used.
typedef Dune::OwnerOverlapCopyCommunication<int, int>::RemoteIndices RemoteIndices;
/// \brief Constructs an empty parallel information object using MPI_COMM_WORLD
ParallelISTLInformation()
: indexSet_(new ParallelIndexSet),
remoteIndices_(new RemoteIndices(*indexSet_, *indexSet_, MPI_COMM_WORLD)),
communicator_(MPI_COMM_WORLD)
{}
/// \brief Constructs an empty parallel information object using a communicator.
/// \param communicator The communicator to use.
ParallelISTLInformation(MPI_Comm communicator)
: indexSet_(new ParallelIndexSet),
remoteIndices_(new RemoteIndices(*indexSet_, *indexSet_, communicator)),
communicator_(communicator)
{}
/// \brief Constructs a parallel information object from the specified information.
/// \param indexSet The parallel index set to use.
/// \param remoteIndices The remote indices information to use.
/// \param communicator The communicator to use.
ParallelISTLInformation(const std::shared_ptr<ParallelIndexSet>& indexSet,
const std::shared_ptr<RemoteIndices>& remoteIndices,
MPI_Comm communicator)
: indexSet_(indexSet), remoteIndices_(remoteIndices), communicator_(communicator)
{}
/// \brief Copy constructor.
///
/// The information will be shared by the the two objects.
ParallelISTLInformation(const ParallelISTLInformation& other)
: indexSet_(other.indexSet_), remoteIndices_(other.remoteIndices_),
communicator_(other.communicator_)
{}
/// \brief Get a pointer to the underlying index set.
std::shared_ptr<ParallelIndexSet> indexSet() const
{
return indexSet_;
}
/// \brief Get a pointer to the remote indices information.
std::shared_ptr<RemoteIndices> remoteIndices() const
{
return remoteIndices_;
}
/// \brief Get the MPI communicator that we use.
MPI_Comm communicator() const
{
return communicator_;
}
/// \brief Copy the information stored to the specified objects.
/// \param[out] indexSet The object to store the index set in.
/// \param[out] remoteIndices The object to store the remote indices information in.
void copyValuesTo(ParallelIndexSet& indexSet, RemoteIndices& remoteIndices) const
{
indexSet.beginResize();
IndexSetInserter<ParallelIndexSet> inserter(indexSet);
std::for_each(indexSet_->begin(), indexSet_->end(), inserter);
indexSet.endResize();
remoteIndices.rebuild<false>();
}
/// \brief Communcate the dofs owned by us to the other process.
///
/// Afterwards all associated dofs will contain the same data.
template<class T>
void copyOwnerToAll (const T& source, T& dest) const
{
typedef Dune::Combine<Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::owner>,Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::overlap>,Dune::OwnerOverlapCopyAttributeSet::AttributeSet> OwnerOverlapSet;
typedef Dune::Combine<OwnerOverlapSet, Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::copy>,Dune::OwnerOverlapCopyAttributeSet::AttributeSet> AllSet;
OwnerOverlapSet sourceFlags;
AllSet destFlags;
Dune::Interface interface(communicator_);
if(!remoteIndices_->isSynced())
remoteIndices_->rebuild<false>();
interface.build(*remoteIndices_,sourceFlags,destFlags);
Dune::BufferedCommunicator communicator;
communicator.template build<T>(interface);
communicator.template forward<CopyGatherScatter<T> >(source,dest);
communicator.free();
}
private:
/** \brief gather/scatter callback for communcation */
template<typename T>
struct CopyGatherScatter
{
typedef typename Dune::CommPolicy<T>::IndexedType V;
static V gather(const T& a, std::size_t i)
{
return a[i];
}
static void scatter(T& a, V v, std::size_t i)
{
a[i] = v;
}
};
template<class T>
class IndexSetInserter
{
public:
typedef T ParallelIndexSet;
IndexSetInserter(ParallelIndexSet& indexSet)
: indexSet_(&indexSet)
{}
void operator()(const typename ParallelIndexSet::IndexPair& pair)
{
indexSet_->add(pair.global(), pair.local());
}
private:
ParallelIndexSet* indexSet_;
};
std::shared_ptr<ParallelIndexSet> indexSet_;
std::shared_ptr<RemoteIndices> remoteIndices_;
MPI_Comm communicator_;
};
} // end namespace Opm
#endif
#endif

View File

@ -0,0 +1,254 @@
/*
Copyright 2014 Dr. Markus Blatt - HPC-Simulation-Software & Services
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if HAVE_DYNAMIC_BOOST_TEST
#define BOOST_TEST_DYN_LINK
#endif
#define NVERBOSE // to suppress our messages when throwing
#define BOOST_TEST_MODULE OPM-ParallelIterativeSolverTest
#include <boost/test/unit_test.hpp>
// MPI header
#if HAVE_MPI
#include <mpi.h>
#include <dune/common/parallel/indexset.hh>
#include <dune/common/parallel/communicator.hh>
#include <dune/common/parallel/remoteindices.hh>
#include <dune/common/mpicollectivecommunication.hh>
#include <dune/common/collectivecommunication.hh>
#include <dune/istl/owneroverlapcopy.hh>
#include <opm/core/linalg/ParallelIstlInformation.hpp>
#else
#error "This file needs to compiled with MPI support!"
#endif
#include <opm/core/linalg/LinearSolverFactory.hpp>
#include <opm/core/utility/parameters/ParameterGroup.hpp>
#include <memory>
#include <cstdlib>
#include <string>
struct MPIFixture {
MPIFixture()
{
int m_argc = boost::unit_test::framework::master_test_suite().argc;
char** m_argv = boost::unit_test::framework::master_test_suite().argv;
MPI_Init(&m_argc, &m_argv);
}
~MPIFixture()
{
MPI_Finalize();
}
};
BOOST_GLOBAL_FIXTURE(MPIFixture);
struct MyMatrix
{
MyMatrix(std::size_t rows, std::size_t nnz)
: data(nnz, 0.0), rowStart(rows+1, -1),
colIndex(nnz, -1)
{}
MyMatrix()
: data(), rowStart(), colIndex()
{}
std::vector<double> data;
std::vector<int> rowStart;
std::vector<int> colIndex;
};
typedef int LocalId;
typedef int GlobalId;
typedef Dune::OwnerOverlapCopyCommunication<GlobalId,LocalId> Communication;
typedef Dune::OwnerOverlapCopyAttributeSet GridAttributes;
typedef GridAttributes::AttributeSet GridFlag;
typedef Dune::ParallelLocalIndex<GridFlag> LocalIndex;
/// \brief Sets up a paralle Laplacian.
///
/// The process stores the unknowns with indices in the range [start, end).
/// As we use an overlapping domain decomposition, the process owns the indices
/// in the range [istart, iend]. If we would only used the indices in this range then
/// they form a partitioning of the whole index set.
/// \tparam I The type of the parallel index set (for convenience)
/// \param indexset The parallel index set for marking owner and copy region.
/// \param N The global number of unknowns of the system.
/// \param start The first index stored on this process
/// \param end One past the last index stored on this process
/// \param istart The first index that the process owns.
/// \param iend One past the last index the process owns.
template<class I>
std::shared_ptr<MyMatrix> create1DLaplacian(I& indexset, int N, int start, int end,
int istart, int iend)
{
indexset.beginResize();
MyMatrix* mm=new MyMatrix(end-start, (end-start)*3);
int nnz=0;
mm->rowStart[0]=0;
assert(start==0||start<istart);
assert(end==N||iend<end);
for(int row=start, localRow=0; row<end; row++, localRow++)
{
if(row<istart || row>=iend)
{
// We are in the overlap region of the grid
// therefore we setup the system such that
// right hand side will equal the left hand side
// of the linear system.
mm->colIndex[nnz]=localRow;
mm->data[nnz++]=1.0;
indexset.add(row, LocalIndex(localRow, GridAttributes::copy, true));
mm->rowStart[localRow+1]=nnz;
continue;
}
double dval=0;
if(row>0)
{
mm->colIndex[nnz]=localRow-1;
mm->data[nnz++]=-1;
dval+=1;
}
mm->colIndex[nnz]=localRow;
mm->data[nnz++]=2;//dval+(row<N-1);
if(row<N-1)
{
mm->colIndex[nnz]=localRow+1;
mm->data[nnz++]=-1;
dval+=1;
}
mm->rowStart[localRow+1]=nnz;
indexset.add(row, LocalIndex(localRow, GridAttributes::owner, true));
}
mm->data.resize(nnz);
mm->colIndex.resize(nnz);
indexset.endResize();
return std::shared_ptr<MyMatrix>(mm);
}
template<class O>
void createRandomVectors(O& pinfo, int NN, std::vector<double>& x, std::vector<double>& b,
const MyMatrix& mat)
{
x.resize(NN);
for(auto entry=x.begin(), end =x.end(); entry!=end; ++entry)
*entry=((double) (rand()%100))/10.0;
pinfo.copyOwnerToAll(x,x);
b.resize(NN);
// Construct the right hand side as b=A*x
std::fill(b.begin(), b.end(), 0.0);
for(std::size_t row=0; row<mat.rowStart.size()-1; ++row)
{
for(int i=mat.rowStart[row], end=mat.rowStart[row+1]; i!=end; ++i)
{
b[row]+= mat.data[i]*x[mat.colIndex[i]];
}
}
pinfo.copyOwnerToAll(b,b);
}
void run_test(const Opm::parameter::ParameterGroup& param)
{
int N=100;
int procs, rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &procs);
int n = N/procs; // number of unknowns per process
int bigger = N%procs; // number of process with n+1 unknows
int start, end, istart, iend;
// Compute owner region
if(rank<bigger) {
start = rank*(n+1);
end = start+(n+1);
}else{
start = bigger*(n+1) + (rank-bigger) * n;
end = start+n;
}
// Compute owner region
if(rank<bigger) {
istart = rank*(n+1);
iend = start+(n+1);
}else{
istart = bigger*(n+1) + (rank-bigger) * n;
iend = start+n;
}
// Compute overlap region
if(istart>0)
start = istart - 1;
else
start = istart;
if(iend<N)
end = iend + 1;
else
end = iend;
Opm::ParallelISTLInformation comm(MPI_COMM_WORLD);
auto mat = create1DLaplacian(*comm.indexSet(), N, start, end, istart, iend);
std::vector<double> x(end-start), b(end-start);
createRandomVectors(comm, end-start, x, b, *mat);
std::vector<double> exact(x);
std::fill(x.begin(), x.end(), 0.0);
Opm::LinearSolverFactory ls(param);
boost::any anyComm(comm);
ls.solve(N, mat->data.size(), &(mat->rowStart[0]),
&(mat->colIndex[0]), &(mat->data[0]), &(b[0]),
&(x[0]), anyComm);
}
#ifdef HAVE_DUNE_ISTL
BOOST_AUTO_TEST_CASE(CGAMGTest)
{
Opm::parameter::ParameterGroup param;
param.insertParameter(std::string("linsolver"), std::string("istl"));
param.insertParameter(std::string("linsolver_type"), std::string("1"));
param.insertParameter(std::string("linsolver_max_iterations"), std::string("200"));
run_test(param);
}
BOOST_AUTO_TEST_CASE(CGILUTest)
{
Opm::parameter::ParameterGroup param;
param.insertParameter(std::string("linsolver"), std::string("istl"));
param.insertParameter(std::string("linsolver_type"), std::string("0"));
param.insertParameter(std::string("linsolver_max_iterations"), std::string("200"));
run_test(param);
}
BOOST_AUTO_TEST_CASE(BiCGILUTest)
{
Opm::parameter::ParameterGroup param;
param.insertParameter(std::string("linsolver"), std::string("istl"));
param.insertParameter(std::string("linsolver_type"), std::string("2"));
param.insertParameter(std::string("linsolver_max_iterations"), std::string("200"));
run_test(param);
}
#endif