added timeblocks in linear solver

This commit is contained in:
hnil 2023-02-15 15:07:58 +01:00
parent 2037e862b4
commit 79d7698dd8
3 changed files with 15 additions and 0 deletions

View File

@ -44,16 +44,19 @@ public:
virtual void pre(X& x, Y& b) override
{
OPM_TIMEBLOCK(pre);
block_precond_.pre(x, b);
}
virtual void apply(X& v, const Y& d) override
{
OPM_TIMEBLOCK(apply);
block_precond_.apply(v, d);
}
virtual void post(X& x) override
{
OPM_TIMEBLOCK(post);
block_precond_.post(x);
}
@ -65,6 +68,7 @@ public:
// The update() function does nothing for a wrapped preconditioner.
virtual void update() override
{
OPM_TIMEBLOCK(update);
orig_precond_.update();
}

View File

@ -95,6 +95,7 @@ void ghost_last_bilu0_decomposition (M& A, size_t interiorSize)
template<class M, class CRS, class InvVector>
void convertToCRS(const M& A, CRS& lower, CRS& upper, InvVector& inv)
{
OPM_TIMEBLOCK(convertToCRS);
// No need to do anything for 0 rows. Return to prevent indexing a
// a zero sized array.
if ( A.N() == 0 )
@ -285,6 +286,7 @@ template<class Matrix, class Domain, class Range, class ParallelInfoT>
void ParallelOverlappingILU0<Matrix,Domain,Range,ParallelInfoT>::
apply (Domain& v, const Range& d)
{
OPM_TIMEBLOCK(apply);
Range& md = reorderD(d);
Domain& mv = reorderV(v);
@ -354,6 +356,7 @@ template<class Matrix, class Domain, class Range, class ParallelInfoT>
void ParallelOverlappingILU0<Matrix,Domain,Range,ParallelInfoT>::
update()
{
OPM_TIMEBLOCK(update);
// (For older DUNE versions the communicator might be
// invalid if redistribution in AMG happened on the coarset level.
// Therefore we check for nonzero size
@ -403,11 +406,13 @@ update()
try
{
OPM_TIMEBLOCK(iluDecomposition);
if (iluIteration_ == 0) {
// create ILU-0 decomposition
if (ordering_.empty())
{
if (ILU_) {
OPM_TIMEBLOCK(iluDecompositionMakeMatrix);
// The ILU_ matrix is already a copy with the same
// sparse structure as A_, but the values of A_ may
// have changed, so we must copy all elements.

View File

@ -31,6 +31,7 @@ namespace Opm
std::shared_ptr<Communication>& commRW,
const int nw)
{
OPM_TIMEBLOCK(extendCommunicatorWithWells);
// used for extending the coarse communicator pattern
using IndexSet = typename Communication::ParallelIndexSet;
using LocalIndex = typename IndexSet::LocalIndex;
@ -104,6 +105,7 @@ namespace Opm
virtual void createCoarseLevelSystem(const FineOperator& fineOperator) override
{
OPM_TIMEBLOCK(createCoarseLevelSystem);
using CoarseMatrix = typename CoarseOperator::matrix_type;
const auto& fineLevelMatrix = fineOperator.getmat();
const auto& nw = fineOperator.getNumberOfExtraEquations();
@ -172,6 +174,7 @@ namespace Opm
virtual void calculateCoarseEntries(const FineOperator& fineOperator) override
{
OPM_TIMEBLOCK(calculateCoarseEntries);
const auto& fineMatrix = fineOperator.getmat();
*coarseLevelMatrix_ = 0;
auto rowCoarse = coarseLevelMatrix_->begin();
@ -196,6 +199,7 @@ namespace Opm
}
}
if (prm_.get<bool>("add_wells")) {
OPM_TIMEBLOCK(cprwAddWellEquation);
assert(transpose == false); // not implemented
bool use_well_weights = prm_.get<bool>("use_well_weights");
fineOperator.addWellPressureEquations(*coarseLevelMatrix_, weights_, use_well_weights);
@ -209,6 +213,7 @@ namespace Opm
virtual void moveToCoarseLevel(const typename ParentType::FineRangeType& fine) override
{
OPM_TIMEBLOCK(moveToCoarseLevel);
//NB we iterate over fine assumming welldofs is at the end
// Set coarse vector to zero
this->rhs_ = 0;
@ -233,6 +238,7 @@ namespace Opm
virtual void moveToFineLevel(typename ParentType::FineDomainType& fine) override
{
OPM_TIMEBLOCK(moveToFineLevel);
//NB we iterate over fine assumming welldofs is at the end
auto end = fine.end(), begin = fine.begin();