checking the results from the direct solver invDX

since I has not figure out a way to detect/catch the singularity of the
matrix with UMFPack, currently, we only check the validity of the
results to make sure inf or nan not enter other part of the simulation.

It can easiliy results in assertion failure, and causes the running to
be terminated. After all, it mostly likely is a numerical issue
generated during the non-linear iteration process.
This commit is contained in:
Kai Bao 2017-10-14 08:50:44 +02:00
parent d407e516ce
commit 7054317d98
3 changed files with 44 additions and 24 deletions

View File

@ -190,11 +190,6 @@ namespace Opm {
if (param_.solve_welleq_initially_ && iterationIdx == 0) {
// solve the well equations as a pre-processing step
report = solveWellEq(ebosSimulator, dt, well_state);
if (report.converged) {
std::cout << " solveWellEq converged ! " << std::endl;
} else {
std::cout << " solveWellEq does not get converged ! " << std::endl;
}
}
assembleWellEq(ebosSimulator, dt, well_state, false);

View File

@ -23,7 +23,7 @@
#define OPM_MSWELLHELPERS_HEADER_INCLUDED
#include <opm/common/ErrorMacros.hpp>
// #include <dune/istl/solvers.hh>
#include <dune/istl/solvers.hh>
#include <dune/istl/umfpack.hh>
#include <cmath>
@ -32,7 +32,40 @@ namespace Opm {
namespace mswellhelpers
{
// obtain y = D^-1 * x
// obtain y = D^-1 * x with a direct solver
template <typename MatrixType, typename VectorType>
VectorType
invDXDirect(const MatrixType& D, VectorType x)
{
VectorType y(x.size());
y = 0.;
Dune::UMFPack<MatrixType> linsolver(D, 0);
// Object storing some statistics about the solving process
Dune::InverseOperatorResult res;
// Solve
linsolver.apply(y, x, res);
// Checking if there is any inf or nan in y
// it will be the solution before we find a way to catch the singularity of the matrix
for (int i_block = 0; i_block < y.size(); ++i_block) {
for (int i_elem = 0; i_elem < y[i_block].size(); ++i_elem) {
if (std::isinf(y[i_block][i_elem]) || std::isnan(y[i_block][i_elem]) ) {
OPM_THROW(Opm::NumericalProblem, "nan or inf value found in invDXDirect due to singular matrix");
}
}
}
return y;
}
// obtain y = D^-1 * x with a BICSSTAB iterative solver
template <typename MatrixType, typename VectorType>
VectorType
invDX(const MatrixType& D, VectorType x)
@ -46,7 +79,7 @@ namespace mswellhelpers
VectorType y(x.size());
y = 0.;
/* Dune::MatrixAdapter<MatrixType, VectorType, VectorType> linearOperator(D);
Dune::MatrixAdapter<MatrixType, VectorType, VectorType> linearOperator(D);
// Sequential incomplete LU decomposition as the preconditioner
Dune::SeqILU0<MatrixType, VectorType, VectorType> preconditioner(D, 1.0);
@ -57,14 +90,10 @@ namespace mswellhelpers
// Preconditioned BICGSTAB solver
Dune::BiCGSTABSolver<VectorType> linsolver(linearOperator,
preconditioner,
// 1.e-8, // desired residual reduction factor
// 1.e-6, // desired residual reduction factor
1.e-4, // desired residual reduction factor
150, // maximum number of iterations
1.e-8, // desired residual reduction factor
250, // maximum number of iterations
0); // verbosity of the solver */
Dune::UMFPack<MatrixType> linsolver(D, 0);
// Object storing some statistics about the solving process
Dune::InverseOperatorResult res;
@ -144,7 +173,7 @@ namespace mswellhelpers
const ValueType& density, const ValueType& w, const ValueType& mu)
{
const double f = calculateFrictionFactor(area, diameter, w.value(), roughness, mu.value());
// TODO: a factor of 2 needs to be here based on the dimensional analysis
// \Note: a factor of 2 needs to be here based on the dimensional analysis
return 2. * f * l * w * w / (area * area * diameter * density);
}

View File

@ -468,8 +468,6 @@ namespace Opm
}
}
std::cout << " maximum_residual " << maximum_residual[0] << " " << maximum_residual[1] << " " << maximum_residual[2] << " " << maximum_residual[3] << std::endl;
if ( !(report.nan_residual_found || report.too_large_residual_found) ) { // no abnormal residual value found
// check convergence for flux residuals
for ( int comp_idx = 0; comp_idx < numComponents(); ++comp_idx)
@ -499,7 +497,7 @@ namespace Opm
duneB_.mv(x, Bx);
// invDBx = duneD^-1 * Bx_
const BVectorWell invDBx = mswellhelpers::invDX(duneD_, Bx);
const BVectorWell invDBx = mswellhelpers::invDXDirect(duneD_, Bx);
// Ax = Ax - duneC_^T * invDBx
duneC_.mmtv(invDBx,Ax);
@ -515,7 +513,7 @@ namespace Opm
apply(BVector& r) const
{
// invDrw_ = duneD^-1 * resWell_
const BVectorWell invDrw = mswellhelpers::invDX(duneD_, resWell_);
const BVectorWell invDrw = mswellhelpers::invDXDirect(duneD_, resWell_);
// r = r - duneC_^T * invDrw
duneC_.mmtv(invDrw, r);
}
@ -637,7 +635,7 @@ namespace Opm
// resWell = resWell - B * x
duneB_.mmv(x, resWell);
// xw = D^-1 * resWell
xw = mswellhelpers::invDX(duneD_, resWell);
xw = mswellhelpers::invDXDirect(duneD_, resWell);
}
@ -652,7 +650,7 @@ namespace Opm
{
// We assemble the well equations, then we check the convergence,
// which is why we do not put the assembleWellEq here.
const BVectorWell dx_well = mswellhelpers::invDX(duneD_, resWell_);
const BVectorWell dx_well = mswellhelpers::invDXDirect(duneD_, resWell_);
updateWellState(dx_well, param, false, well_state);
}
@ -1791,11 +1789,10 @@ namespace Opm
const int max_iter_number = param.max_inner_iter_ms_wells_;
int it = 0;
for (; it < max_iter_number; ++it) {
std::cout << " iterateWellEquations it " << it << std::endl;
assembleWellEqWithoutIteration(ebosSimulator, dt, well_state, true);
const BVectorWell dx_well = mswellhelpers::invDX(duneD_, resWell_);
const BVectorWell dx_well = mswellhelpers::invDXDirect(duneD_, resWell_);
// TODO: use these small values for now, not intend to reach the convergence
// in this stage, but, should we?
@ -1807,7 +1804,6 @@ namespace Opm
const ConvergenceReport report = getWellConvergence(ebosSimulator, B, param);
if (report.converged) {
std::cout << " converged in iterateWellEquations " << std::endl;
break;
}