Support for multiple linear solvers.

This commit is contained in:
Franz Georg Fuchs 2022-11-09 16:54:07 +01:00 committed by Atgeirr Flø Rasmussen
parent 79b417beb5
commit 6ee90b3f8f
3 changed files with 181 additions and 51 deletions

View File

@ -594,21 +594,86 @@ namespace Opm {
auto& ebosJac = ebosSimulator_.model().linearizer().jacobian().istlMatrix();
auto& ebosResid = ebosSimulator_.model().linearizer().residual();
// set initial guess
x = 0.0;
auto& ebosSolver = ebosSimulator_.model().newtonMethod().linearSolver();
Dune::Timer perfTimer;
perfTimer.start();
ebosSolver.prepare(ebosJac, ebosResid);
linear_solve_setup_time_ = perfTimer.stop();
ebosSolver.setResidual(ebosResid);
// actually, the error needs to be calculated after setResidual in order to
// account for parallelization properly. since the residual of ECFV
// discretizations does not need to be synchronized across processes to be
// consistent, this is not relevant for OPM-flow...
ebosSolver.solve(x);
std::cerr<<"Hello there!!!"<<std::to_string(ebosSolver.getSolveCount())<<std::endl;
if (ebosSolver.getSolveCount()%100==0) {
if ( terminal_output_ ) {
OpmLog::debug("Running speed test");
}
Dune::Timer perfTimer;
auto ebosJacCopy = ebosJac;
auto ebosResidCopy = ebosResid;
// solve with linear solver 0
x = 0.0;
BVector x0(x);
ebosSolver.setActiveSolver(0);
perfTimer.start();
ebosSolver.prepare(ebosJac, ebosResid);
auto linear_solve_setup_time0 = perfTimer.stop();
perfTimer.reset();
ebosSolver.setResidual(ebosResid);
perfTimer.start();
ebosSolver.solve(x0);
auto time0 = perfTimer.stop();
perfTimer.reset();
// reset matrix to the original and Rhs
ebosJac = ebosJacCopy;
ebosResid = ebosResidCopy;
// solve with linear solver 1
x = 0.0;
BVector x1(x);
ebosSolver.setActiveSolver(1);
perfTimer.start();
ebosSolver.prepare(ebosJac, ebosResid);
auto linear_solve_setup_time1 = perfTimer.stop();
perfTimer.reset();
ebosSolver.setResidual(ebosResid);
perfTimer.start();
ebosSolver.solve(x1);
auto time1 = perfTimer.stop();
perfTimer.reset();
if ( terminal_output_ ) {
OpmLog::debug("Solver time 0: " + std::to_string(time0) + "\n");
OpmLog::debug("Solver time 1: " + std::to_string(time1) + "\n");
}
int fastest_solver = time0 < time1 ? 0 : 1;
grid_.comm().broadcast(&fastest_solver, 1, 0);
if ( terminal_output_ ) {
OpmLog::debug("Using solver " + std::to_string(fastest_solver));
}
if (fastest_solver == 0) {
linear_solve_setup_time_ = linear_solve_setup_time0;
x=x0;
} else {
linear_solve_setup_time_ = linear_solve_setup_time1;
x=x1;
}
ebosSolver.setActiveSolver(fastest_solver);
} else {
// set initial guess
x = 0.0;
Dune::Timer perfTimer;
perfTimer.start();
ebosSolver.prepare(ebosJac, ebosResid);
linear_solve_setup_time_ = perfTimer.stop();
ebosSolver.setResidual(ebosResid);
// actually, the error needs to be calculated after setResidual in order to
// account for parallelization properly. since the residual of ECFV
// discretizations does not need to be synchronized across processes to be
// consistent, this is not relevant for OPM-flow...
ebosSolver.solve(x);
}
}

View File

@ -187,7 +187,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
calls_( 0 ),
converged_(false),
matrix_(nullptr),
parameters_(parameters),
parameters_{parameters},
forceSerial_(forceSerial)
{
initialize();
@ -199,19 +199,46 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
: simulator_(simulator),
iterations_( 0 ),
calls_( 0 ),
solveCount_(0),
converged_(false),
matrix_(nullptr)
{
parameters_.template init<TypeTag>(simulator_.vanguard().eclState().getSimulationConfig().useCPR());
parameters_.resize(1);
parameters_[0].template init<TypeTag>(simulator_.vanguard().eclState().getSimulationConfig().useCPR());
initialize();
}
void initialize(bool have_gpu = false)
{
OPM_TIMEBLOCK(IstlSolverEbos);
prm_ = setupPropertyTree(parameters_,
EWOMS_PARAM_IS_SET(TypeTag, int, LinearSolverMaxIter),
EWOMS_PARAM_IS_SET(TypeTag, double, LinearSolverReduction));
// prm_ = setupPropertyTree(parameters_,
// EWOMS_PARAM_IS_SET(TypeTag, int, LinearSolverMaxIter),
// EWOMS_PARAM_IS_SET(TypeTag, double, LinearSolverReduction));
// ------------ the following is hard coded for testing purposes!!!
prm_.clear();
parameters_.clear();
{
FlowLinearSolverParameters para;
para.init<TypeTag>(false);
para.linsolver_ = "cprw";
parameters_.push_back(para);
prm_.push_back(setupPropertyTree(parameters_[0],
EWOMS_PARAM_IS_SET(TypeTag, int, LinearSolverMaxIter),
EWOMS_PARAM_IS_SET(TypeTag, double, LinearSolverReduction)
));
}
{
FlowLinearSolverParameters para;
para.init<TypeTag>(false);
para.linsolver_ = "ilu0";
parameters_.push_back(para);
prm_.push_back(setupPropertyTree(parameters_[1],
EWOMS_PARAM_IS_SET(TypeTag, int, LinearSolverMaxIter),
EWOMS_PARAM_IS_SET(TypeTag, double, LinearSolverReduction)
));
}
flexibleSolver_.resize(prm_.size());
// ------------
const bool on_io_rank = (simulator_.gridView().comm().rank() == 0);
#if HAVE_MPI
@ -234,13 +261,19 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
OPM_THROW_NOLOG(std::runtime_error, msg);
}
flexibleSolver_.interiorCellNum_ = detail::numMatrixRowsToUseInSolver(simulator_.vanguard().grid(), true);
const int interiorCellNum_ = detail::numMatrixRowsToUseInSolver(simulator_.vanguard().grid(), true);
for (auto& f : flexibleSolver_) {
f.interiorCellNum_ = interiorCellNum_;
}
// Print parameters to PRT/DBG logs.
if (on_io_rank && parameters_.linear_solver_print_json_definition_) {
if (on_io_rank && parameters_[activeSolverNum_].linear_solver_print_json_definition_) {
std::ostringstream os;
os << "Property tree for linear solver:\n";
prm_.write_json(os, true);
os << "Property tree for linear solvers:\n";
for (std::size_t i = 0; i<prm_.size(); i++) {
prm_[i].write_json(os, true);
std::cerr<< "debug: ["<<i<<"] : " << os.str() <<std::endl;
}
OpmLog::note(os.str());
}
if(have_gpu){
@ -256,6 +289,23 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
{
}
void setActiveSolver(const int num)
{
if (num>prm_.size()-1) {
OPM_THROW(std::logic_error,"Solver number"+std::to_string(num)+"not available.");
}
activeSolverNum_ = num;
auto cc = Dune::MPIHelper::getCollectiveCommunication();
if (cc.rank() == 0) {
OpmLog::note("Active solver = "+std::to_string(activeSolverNum_)+"\n");
}
}
int numAvailableSolvers()
{
return activeSolverNum_;
}
void prepare(const SparseMatrixAdapter& M, Vector& b)
{
prepare(M.istlMatrix(), b);
@ -290,7 +340,8 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
}
rhs_ = &b;
if (isParallel() && prm_.get<std::string>("preconditioner.type") != "ParOverILU0") {
// TODO: check all solvers, not just one.
if (isParallel() && prm_[activeSolverNum_].template get<std::string>("preconditioner.type") != "ParOverILU0") {
detail::makeOverlapRowsInvalid(getMatrix(), overlapRows_);
}
prepareFlexibleSolver();
@ -312,12 +363,21 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
// matrix_ = &M.istlMatrix(); // Must be handled in prepare() instead.
}
int getSolveCount() const {
return solveCount_;
}
void resetSolveCount() {
solveCount_ = 0;
}
bool solve(Vector& x)
{
OPM_TIMEBLOCK(istlSolverEbosSolve);
++solveCount_;
calls_ += 1;
// Write linear system if asked for.
const int verbosity = prm_.get<int>("verbosity", 0);
const int verbosity = prm_[activeSolverNum_].get("verbosity", 0);
const bool write_matrix = verbosity > 10;
if (write_matrix) {
Helper::writeSystem(simulator_, //simulator is only used to get names
@ -330,8 +390,8 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
Dune::InverseOperatorResult result;
{
OPM_TIMEBLOCK(flexibleSolverApply);
assert(flexibleSolver_.solver_);
flexibleSolver_.solver_->apply(x, *rhs_, result);
assert(flexibleSolver_[activeSolverNum_].solver_);
flexibleSolver_[activeSolverNum_].solver_->apply(x, *rhs_, result);
}
// Check convergence, iterations etc.
@ -366,7 +426,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
iterations_ = result.iterations;
converged_ = result.converged;
if(!converged_){
if(result.reduction < parameters_.relaxed_linear_solver_reduction_){
if(result.reduction < parameters_[activeSolverNum_].relaxed_linear_solver_reduction_){
std::stringstream ss;
ss<< "Full linear solver tolerance not achived. The reduction is:" << result.reduction
<< " after " << result.iterations << " iterations ";
@ -375,7 +435,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
}
}
// Check for failure of linear solver.
if (!parameters_.ignoreConvergenceFailure_ && !converged_) {
if (!parameters_[activeSolverNum_].ignoreConvergenceFailure_ && !converged_) {
const std::string msg("Convergence failure for linear solver.");
OPM_THROW_NOLOG(NumericalProblem, msg);
}
@ -402,21 +462,21 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
if (!useWellConn_) {
auto wellOp = std::make_unique<WellModelOperator>(simulator_.problem().wellModel());
flexibleSolver_.wellOperator_ = std::move(wellOp);
flexibleSolver_[activeSolverNum_].wellOperator_ = std::move(wellOp);
}
OPM_TIMEBLOCK(flexibleSolverCreate);
flexibleSolver_.create(getMatrix(),
isParallel(),
prm_,
pressureIndex,
trueFunc,
forceSerial_,
*comm_);
flexibleSolver_[activeSolverNum_].create(getMatrix(),
isParallel(),
prm_[activeSolverNum_],
pressureIndex,
trueFunc,
forceSerial_,
*comm_);
}
else
{
OPM_TIMEBLOCK(flexibleSolverUpdate);
flexibleSolver_.pre_->update();
flexibleSolver_[activeSolverNum_].pre_->update();
}
}
@ -427,36 +487,39 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
{
// Decide if we should recreate the solver or just do
// a minimal preconditioner update.
if (!flexibleSolver_.solver_) {
if (flexibleSolver_.empty()) {
return true;
}
if (this->parameters_.cpr_reuse_setup_ == 0) {
if (!flexibleSolver_[activeSolverNum_].solver_) {
return true;
}
if (this->parameters_[activeSolverNum_].cpr_reuse_setup_ == 0) {
// Always recreate solver.
return true;
}
if (this->parameters_.cpr_reuse_setup_ == 1) {
if (this->parameters_[activeSolverNum_].cpr_reuse_setup_ == 1) {
// Recreate solver on the first iteration of every timestep.
const int newton_iteration = this->simulator_.model().newtonMethod().numIterations();
return newton_iteration == 0;
}
if (this->parameters_.cpr_reuse_setup_ == 2) {
if (this->parameters_[activeSolverNum_].cpr_reuse_setup_ == 2) {
// Recreate solver if the last solve used more than 10 iterations.
return this->iterations() > 10;
}
if (this->parameters_.cpr_reuse_setup_ == 3) {
if (this->parameters_[activeSolverNum_].cpr_reuse_setup_ == 3) {
// Recreate solver if the last solve used more than 10 iterations.
return false;
}
if (this->parameters_.cpr_reuse_setup_ == 4) {
if (this->parameters_[activeSolverNum_].cpr_reuse_setup_ == 4) {
// Recreate solver every 'step' solve calls.
const int step = this->parameters_.cpr_reuse_interval_;
const int step = this->parameters_[activeSolverNum_].cpr_reuse_interval_;
const bool create = ((calls_ % step) == 0);
return create;
}
// If here, we have an invalid parameter.
const bool on_io_rank = (simulator_.gridView().comm().rank() == 0);
std::string msg = "Invalid value: " + std::to_string(this->parameters_.cpr_reuse_setup_)
std::string msg = "Invalid value: " + std::to_string(this->parameters_[activeSolverNum_].cpr_reuse_setup_)
+ " for --cpr-reuse-setup parameter, run with --help to see allowed values.";
if (on_io_rank) {
OpmLog::error(msg);
@ -496,6 +559,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
const Simulator& simulator_;
mutable int iterations_;
mutable int calls_;
mutable int solveCount_;
mutable bool converged_;
std::any parallelInformation_;
@ -503,15 +567,16 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
Matrix* matrix_;
Vector *rhs_;
detail::FlexibleSolverInfo<Matrix,Vector,CommunicationType> flexibleSolver_;
int activeSolverNum_ = 0;
std::vector<detail::FlexibleSolverInfo<Matrix,Vector,CommunicationType>> flexibleSolver_;
std::vector<int> overlapRows_;
std::vector<int> interiorRows_;
bool useWellConn_;
FlowLinearSolverParameters parameters_;
std::vector<FlowLinearSolverParameters> parameters_;
bool forceSerial_ = false;
PropertyTree prm_;
std::vector<PropertyTree> prm_;
std::shared_ptr< CommunicationType > comm_;
}; // end ISTLSolver

View File

@ -30,8 +30,8 @@ namespace Opm
struct FlowLinearSolverParameters;
PropertyTree setupPropertyTree(FlowLinearSolverParameters p,
bool LinearSolverMaxIterSet,
bool CprMaxEllIterSet);
bool linearSolverMaxIterSet,
bool linearSolverReductionSet);
PropertyTree setupCPRW(const std::string& conf, const FlowLinearSolverParameters& p);
PropertyTree setupCPR(const std::string& conf, const FlowLinearSolverParameters& p);