Merge pull request #4495 from atgeirr/add_timing_block

Add timing block
This commit is contained in:
Atgeirr Flø Rasmussen 2023-02-28 15:00:17 +01:00 committed by GitHub
commit b5678bf5cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 73 additions and 11 deletions

View File

@ -29,7 +29,7 @@
#define EWOMS_ECL_GENERIC_TRACER_MODEL_HH
#include <opm/grid/common/CartesianIndexMapper.hpp>
#include <opm/common/ErrorMacros.hpp>
#include <opm/models/blackoil/blackoilmodel.hh>
#include <opm/simulators/linalg/matrixblock.hh>

View File

@ -216,6 +216,7 @@ public:
*/
void processElement(const ElementContext& elemCtx)
{
OPM_TIMEBLOCK_LOCAL(processElement);
if (!std::is_same<Discretization, EcfvDiscretization<TypeTag>>::value)
return;
@ -857,6 +858,7 @@ public:
template <class ActiveIndex, class CartesianIndex>
void processFluxes(const ElementContext& elemCtx, ActiveIndex&& activeIndex, CartesianIndex&& cartesianIndex)
{
OPM_TIMEBLOCK_LOCAL(processFluxes);
const auto identifyCell = [&activeIndex, &cartesianIndex](const Element& elem) -> EclInterRegFlowMap::Cell {
const auto cellIndex = activeIndex(elem);
@ -983,7 +985,7 @@ private:
void updateFluidInPlace_(const ElementContext& elemCtx, unsigned dofIdx)
{
OPM_TIMEBLOCK_LOCAL(updateFluidInPlace);
const auto& intQuants = elemCtx.intensiveQuantities(dofIdx, /*timeIdx=*/0);
const auto& fs = intQuants.fluidState();
unsigned globalDofIdx = elemCtx.globalSpaceIndex(dofIdx, /*timeIdx=*/0);

View File

@ -975,6 +975,7 @@ public:
*/
void beginEpisode()
{
OPM_TIMEBLOCK(beginEpisode);
// Proceed to the next report step
auto& simulator = this->simulator();
int episodeIdx = simulator.episodeIndex();
@ -1038,6 +1039,7 @@ public:
*/
void beginTimeStep()
{
OPM_TIMEBLOCK(beginTimeStep);
int episodeIdx = this->episodeIndex();
this->beginTimeStep_(enableExperiments,
@ -1059,9 +1061,11 @@ public:
// the derivatives may have change
bool invalidateIntensiveQuantities = invalidateFromMaxWaterSat || invalidateFromMinPressure || invalidateFromHyst || invalidateFromMaxOilSat;
if (invalidateIntensiveQuantities)
if (invalidateIntensiveQuantities){
OPM_TIMEBLOCK(beginTimeStepInvalidateIntensiveQuantities);
this->model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
}
if constexpr (getPropValue<TypeTag, Properties::EnablePolymer>())
updateMaxPolymerAdsorption_();
@ -1077,6 +1081,7 @@ public:
*/
void beginIteration()
{
OPM_TIMEBLOCK(beginIteration);
wellModel_.beginIteration();
if (enableAquifers_)
aquiferModel_.beginIteration();
@ -1087,6 +1092,7 @@ public:
*/
void endIteration()
{
OPM_TIMEBLOCK(endIteration);
wellModel_.endIteration();
if (enableAquifers_)
aquiferModel_.endIteration();
@ -1097,6 +1103,7 @@ public:
*/
void endTimeStep()
{
OPM_TIMEBLOCK(endTimeStep);
#ifndef NDEBUG
if constexpr (getPropValue<TypeTag, Properties::EnableDebuggingChecks>()) {
// in debug mode, we don't care about performance, so we check if the model does
@ -1169,6 +1176,7 @@ public:
*/
void endEpisode()
{
OPM_TIMEBLOCK(endEpisode);
auto& simulator = this->simulator();
auto& schedule = simulator.vanguard().schedule();
@ -1193,16 +1201,19 @@ public:
*/
void writeOutput(bool verbose = true)
{
OPM_TIMEBLOCK(problemWriteOutput);
// use the generic code to prepare the output fields and to
// write the desired VTK files.
ParentType::writeOutput(verbose);
bool isSubStep = !EWOMS_GET_PARAM(TypeTag, bool, EnableWriteAllSolutions) && !this->simulator().episodeWillBeOver();
if (enableEclOutput_)
if (enableEclOutput_){
eclWriter_->writeOutput(isSubStep);
}
}
void finalizeOutput() {
OPM_TIMEBLOCK(finalizeOutput);
// this will write all pending output to disk
// to avoid corruption of output files
eclWriter_.reset();
@ -1463,6 +1474,7 @@ public:
FluidState &fluidState,
unsigned globalSpaceIdx) const
{
OPM_TIMEBLOCK_LOCAL(updateRelperms);
{
// calculate relative permeabilities. note that we store the result into the
// mobility_ class attribute. the division by the phase viscosity happens later.
@ -1560,6 +1572,7 @@ public:
unsigned spaceIdx,
unsigned timeIdx) const
{
OPM_TIMEBLOCK(eclProblemBoundary);
if (!context.intersection(spaceIdx).boundary())
return;
@ -1769,6 +1782,7 @@ public:
unsigned globalDofIdx,
unsigned timeIdx) const
{
OPM_TIMEBLOCK(eclProblemSource);
rate = 0.0;
wellModel_.computeTotalRatesForDof(rate, globalDofIdx);
@ -1850,6 +1864,7 @@ public:
const InitialFluidState boundaryFluidState(unsigned globalDofIdx, const int directionId) const
{
OPM_TIMEBLOCK_LOCAL(boundaryFluidState);
FaceDir::DirEnum dir = FaceDir::FromIntersectionIndex(directionId);
const auto& dirichlet = dirichlet_(dir)[globalDofIdx];
if(std::get<0>(dirichlet) == BCComponent::NONE)
@ -1939,6 +1954,7 @@ public:
*/
Scalar nextTimeStepSize() const
{
OPM_TIMEBLOCK(nexTimeStepSize);
// allow external code to do the timestepping
if (this->nextTimeStepSize_ > 0.0)
return this->nextTimeStepSize_;
@ -1965,7 +1981,7 @@ public:
template <class LhsEval>
LhsEval rockCompPoroMultiplier(const IntensiveQuantities& intQuants, unsigned elementIdx) const
{
OPM_TIMEBLOCK_LOCAL(rockCompPoroMultiplier);
if (this->rockCompPoroMult_.empty() && this->rockCompPoroMultWc_.empty())
return 1.0;
@ -2005,6 +2021,7 @@ public:
template <class LhsEval>
LhsEval rockCompTransMultiplier(const IntensiveQuantities& intQuants, unsigned elementIdx) const
{
OPM_TIMEBLOCK_LOCAL(rockCompTransMultiplier);
if (this->rockCompTransMult_.empty() && this->rockCompTransMultWc_.empty())
return 1.0;
@ -2037,6 +2054,7 @@ public:
std::pair<bool, RateVector> boundaryCondition(const unsigned int globalSpaceIdx, const int directionId)
{
OPM_TIMEBLOCK_LOCAL(boundaryCondition);
if (!nonTrivialBoundaryConditions_) {
return { false, RateVector(0.0) };
}
@ -2068,6 +2086,7 @@ private:
void updateProperty_(const std::string& failureMsg,
UpdateFunc func)
{
OPM_TIMEBLOCK(updateProperty);
ElementContext elemCtx(this->simulator());
const auto& vanguard = this->simulator().vanguard();
OPM_BEGIN_PARALLEL_TRY_CATCH();
@ -2085,6 +2104,7 @@ private:
// update the parameters needed for DRSDT and DRVDT
void updateCompositionChangeLimits_()
{
OPM_TIMEBLOCK(updateCompositionChangeLimits);
// update the "last Rs" values for all elements, including the ones in the ghost
// and overlap regions
int episodeIdx = this->episodeIndex();
@ -2155,6 +2175,7 @@ private:
bool updateMaxOilSaturation_()
{
OPM_TIMEBLOCK(updateMaxOilSaturation);
int episodeIdx = this->episodeIndex();
// we use VAPPARS
@ -2175,6 +2196,7 @@ private:
bool updateMaxWaterSaturation_()
{
OPM_TIMEBLOCK(updateMaxWaterSaturation);
// water compaction is activated in ROCKCOMP
if (this->maxWaterSaturation_.empty())
return false;
@ -2193,6 +2215,7 @@ private:
bool updateMinPressure_()
{
OPM_TIMEBLOCK(updateMinPressure);
// IRREVERS option is used in ROCKCOMP
if (this->minOilPressure_.empty())
return false;
@ -2210,6 +2233,7 @@ private:
void readMaterialParameters_()
{
OPM_TIMEBLOCK(readMaterialParameters);
const auto& simulator = this->simulator();
const auto& vanguard = simulator.vanguard();
const auto& eclState = vanguard.eclState();

View File

@ -305,6 +305,7 @@ public:
void writeOutput(bool isSubStep)
{
OPM_TIMEBLOCK(writeOutput);
const int reportStepNum = simulator_.episodeIndex() + 1;
this->prepareLocalCellData(isSubStep, reportStepNum);
this->eclOutputModule_->outputErrorLog(simulator_.gridView().comm());
@ -506,6 +507,7 @@ private:
void prepareLocalCellData(const bool isSubStep,
const int reportStepNum)
{
OPM_TIMEBLOCK(prepareLocalCellData);
const auto& gridView = simulator_.vanguard().gridView();
const int numElements = gridView.size(/*codim=*/0);
const bool log = this->collectToIORank_.isIORank();
@ -526,6 +528,7 @@ private:
void captureLocalFluxData()
{
OPM_TIMEBLOCK(captureLocalData);
const auto& gridView = this->simulator_.vanguard().gridView();
const auto timeIdx = 0u;

View File

@ -631,6 +631,7 @@ namespace Opm {
/// Apply an update to the primary variables.
void updateSolution(const BVector& dx)
{
OPM_TIMEBLOCK(updateSolution);
auto& ebosNewtonMethod = ebosSimulator_.model().newtonMethod();
SolutionVector& solution = ebosSimulator_.model().solution(/*timeIdx=*/0);
@ -642,7 +643,10 @@ namespace Opm {
// residual
// if the solution is updated, the intensive quantities need to be recalculated
ebosSimulator_.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
{
OPM_TIMEBLOCK(invalidateAndUpdateIntensiveQuantities);
ebosSimulator_.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
}
}
/// Return true if output to cout is wanted.
@ -659,6 +663,7 @@ namespace Opm {
std::vector< Scalar >& maxCoeff,
std::vector< Scalar >& B_avg)
{
OPM_TIMEBLOCK(convergenceReduction);
// Compute total pore volume (use only owned entries)
double pvSum = pvSumLocal;
double numAquiferPvSum = numAquiferPvSumLocal;
@ -718,6 +723,7 @@ namespace Opm {
std::vector<Scalar>& maxCoeff,
std::vector<Scalar>& B_avg)
{
OPM_TIMEBLOCK(localConvergenceData);
double pvSumLocal = 0.0;
double numAquiferPvSumLocal = 0.0;
const auto& ebosModel = ebosSimulator_.model();
@ -848,6 +854,7 @@ namespace Opm {
/// of a numerical aquifer.
double computeCnvErrorPv(const std::vector<Scalar>& B_avg, double dt)
{
OPM_TIMEBLOCK(computeCnvErrorPv);
double errorPV{};
const auto& ebosModel = ebosSimulator_.model();
const auto& ebosProblem = ebosSimulator_.problem();
@ -895,6 +902,7 @@ namespace Opm {
std::vector<Scalar>& B_avg,
std::vector<Scalar>& residual_norms)
{
OPM_TIMEBLOCK(getReservoirConvergence);
typedef std::vector< Scalar > Vector;
const int numComp = numEq;
@ -1004,13 +1012,16 @@ namespace Opm {
const int iteration,
std::vector<double>& residual_norms)
{
OPM_TIMEBLOCK(getConvergence);
// Get convergence reports for reservoir and wells.
std::vector<Scalar> B_avg(numEq, 0.0);
auto report = getReservoirConvergence(timer.simulationTimeElapsed(),
timer.currentStepLength(),
iteration, B_avg, residual_norms);
report += wellModel().getWellConvergence(B_avg, /*checkWellGroupControls*/report.converged());
{
OPM_TIMEBLOCK(getWellConvergence);
report += wellModel().getWellConvergence(B_avg, /*checkWellGroupControls*/report.converged());
}
return report;
}
@ -1033,6 +1044,7 @@ namespace Opm {
std::vector<std::vector<double> >
computeFluidInPlace(const std::vector<int>& /*fipnum*/) const
{
OPM_TIMEBLOCK(computeFluidInPlace);
//assert(true)
//return an empty vector
std::vector<std::vector<double> > regionValues(0, std::vector<double>(0,0.0));

View File

@ -237,6 +237,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
converged_(false),
matrix_()
{
OPM_TIMEBLOCK(IstlSolverEbos);
const bool on_io_rank = (simulator.gridView().comm().rank() == 0);
#if HAVE_MPI
comm_.reset( new CommunicationType( simulator_.vanguard().grid().comm() ) );
@ -311,6 +312,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
void prepare(const SparseMatrixAdapter& M, Vector& b)
{
OPM_TIMEBLOCK(istlSolverEbosPrepare);
static bool firstcall = true;
#if HAVE_MPI
if (firstcall) {
@ -370,6 +372,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
bool solve(Vector& x)
{
OPM_TIMEBLOCK(istlSolverEbosSolve);
calls_ += 1;
// Write linear system if asked for.
const int verbosity = prm_.get<int>("verbosity", 0);
@ -396,6 +399,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
x, result))
#endif
{
OPM_TIMEBLOCK(flexibleSolverApply);
assert(flexibleSolver_.solver_);
flexibleSolver_.solver_->apply(x, *rhs_, result);
}
@ -448,7 +452,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
void prepareFlexibleSolver()
{
OPM_TIMEBLOCK(flexibleSolverPrepare);
if (shouldCreateSolver()) {
std::function<Vector()> trueFunc =
[this]
@ -460,7 +464,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
auto wellOp = std::make_unique<WellModelOperator>(simulator_.problem().wellModel());
flexibleSolver_.wellOperator_ = std::move(wellOp);
}
OPM_TIMEBLOCK(flexibleSolverCreate);
flexibleSolver_.create(getMatrix(),
isParallel(),
prm_,
@ -470,6 +474,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
}
else
{
OPM_TIMEBLOCK(flexibleSolverUpdate);
flexibleSolver_.pre_->update();
}
}
@ -527,6 +532,7 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
// conservation equations, ignoring all other terms.
Vector getTrueImpesWeights(int pressureVarIndex) const
{
OPM_TIMEBLOCK(getTrueImpesWeights);
Vector weights(rhs_->size());
ElementContext elemCtx(simulator_);
Amg::getTrueImpesWeights(pressureVarIndex, weights,

View File

@ -44,16 +44,19 @@ public:
virtual void pre(X& x, Y& b) override
{
OPM_TIMEBLOCK(pre);
block_precond_.pre(x, b);
}
virtual void apply(X& v, const Y& d) override
{
OPM_TIMEBLOCK(apply);
block_precond_.apply(v, d);
}
virtual void post(X& x) override
{
OPM_TIMEBLOCK(post);
block_precond_.post(x);
}
@ -65,6 +68,7 @@ public:
// The update() function does nothing for a wrapped preconditioner.
virtual void update() override
{
OPM_TIMEBLOCK(update);
orig_precond_.update();
}

View File

@ -95,6 +95,7 @@ void ghost_last_bilu0_decomposition (M& A, size_t interiorSize)
template<class M, class CRS, class InvVector>
void convertToCRS(const M& A, CRS& lower, CRS& upper, InvVector& inv)
{
OPM_TIMEBLOCK(convertToCRS);
// No need to do anything for 0 rows. Return to prevent indexing a
// a zero sized array.
if ( A.N() == 0 )
@ -285,6 +286,7 @@ template<class Matrix, class Domain, class Range, class ParallelInfoT>
void ParallelOverlappingILU0<Matrix,Domain,Range,ParallelInfoT>::
apply (Domain& v, const Range& d)
{
OPM_TIMEBLOCK(apply);
Range& md = reorderD(d);
Domain& mv = reorderV(v);
@ -354,6 +356,7 @@ template<class Matrix, class Domain, class Range, class ParallelInfoT>
void ParallelOverlappingILU0<Matrix,Domain,Range,ParallelInfoT>::
update()
{
OPM_TIMEBLOCK(update);
// (For older DUNE versions the communicator might be
// invalid if redistribution in AMG happened on the coarset level.
// Therefore we check for nonzero size
@ -403,11 +406,13 @@ update()
try
{
OPM_TIMEBLOCK(iluDecomposition);
if (iluIteration_ == 0) {
// create ILU-0 decomposition
if (ordering_.empty())
{
if (ILU_) {
OPM_TIMEBLOCK(iluDecompositionMakeMatrix);
// The ILU_ matrix is already a copy with the same
// sparse structure as A_, but the values of A_ may
// have changed, so we must copy all elements.

View File

@ -31,6 +31,7 @@ namespace Opm
std::shared_ptr<Communication>& commRW,
const int nw)
{
OPM_TIMEBLOCK(extendCommunicatorWithWells);
// used for extending the coarse communicator pattern
using IndexSet = typename Communication::ParallelIndexSet;
using LocalIndex = typename IndexSet::LocalIndex;
@ -104,6 +105,7 @@ namespace Opm
virtual void createCoarseLevelSystem(const FineOperator& fineOperator) override
{
OPM_TIMEBLOCK(createCoarseLevelSystem);
using CoarseMatrix = typename CoarseOperator::matrix_type;
const auto& fineLevelMatrix = fineOperator.getmat();
const auto& nw = fineOperator.getNumberOfExtraEquations();
@ -172,6 +174,7 @@ namespace Opm
virtual void calculateCoarseEntries(const FineOperator& fineOperator) override
{
OPM_TIMEBLOCK(calculateCoarseEntries);
const auto& fineMatrix = fineOperator.getmat();
*coarseLevelMatrix_ = 0;
auto rowCoarse = coarseLevelMatrix_->begin();
@ -196,6 +199,7 @@ namespace Opm
}
}
if (prm_.get<bool>("add_wells")) {
OPM_TIMEBLOCK(cprwAddWellEquation);
assert(transpose == false); // not implemented
bool use_well_weights = prm_.get<bool>("use_well_weights");
fineOperator.addWellPressureEquations(*coarseLevelMatrix_, weights_, use_well_weights);
@ -209,6 +213,7 @@ namespace Opm
virtual void moveToCoarseLevel(const typename ParentType::FineRangeType& fine) override
{
OPM_TIMEBLOCK(moveToCoarseLevel);
//NB we iterate over fine assumming welldofs is at the end
// Set coarse vector to zero
this->rhs_ = 0;
@ -233,6 +238,7 @@ namespace Opm
virtual void moveToFineLevel(typename ParentType::FineDomainType& fine) override
{
OPM_TIMEBLOCK(moveToFineLevel);
//NB we iterate over fine assumming welldofs is at the end
auto end = fine.end(), begin = fine.begin();