Merge pull request #806 from atgeirr/hnil-cache_less

Enable turning off storage cache.
This commit is contained in:
Bård Skaflestad 2023-05-26 16:19:26 +02:00 committed by GitHub
commit 876310f060
4 changed files with 62 additions and 29 deletions

View File

@ -690,7 +690,7 @@ public:
if (sg < -eps && s > 0.0 && FluidSystem::enableDissolvedGas()) {
const Scalar& po = (*this)[pressureSwitchIdx];
setPrimaryVarsMeaningGas(GasMeaning::Rs);
Scalar soMax = problem.maxOilSaturation(globalDofIdx);
Scalar soMax = std::max(s, problem.maxOilSaturation(globalDofIdx));
Scalar rsMax = problem.maxGasDissolutionFactor(/*timeIdx=*/0, globalDofIdx);
Scalar rsSat = enableExtbo ? ExtboModule::rs(pvtRegionIndex(),
po,

View File

@ -650,12 +650,12 @@ public:
// synchronize the ghost DOFs (if necessary)
asImp_().syncOverlap();
simulator_.problem().initialSolutionApplied();
// also set the solutions of the "previous" time steps to the initial solution.
for (unsigned timeIdx = 1; timeIdx < historySize; ++timeIdx)
solution(timeIdx) = solution(/*timeIdx=*/0);
simulator_.problem().initialSolutionApplied();
#ifndef NDEBUG
for (unsigned timeIdx = 0; timeIdx < historySize; ++timeIdx) {
const auto& sol = solution(timeIdx);
@ -723,14 +723,18 @@ public:
*/
const IntensiveQuantities* cachedIntensiveQuantities(unsigned globalIdx, unsigned timeIdx) const
{
if (!enableIntensiveQuantityCache_ ||
!intensiveQuantityCacheUpToDate_[timeIdx][globalIdx])
return 0;
if (!enableIntensiveQuantityCache_ || !intensiveQuantityCacheUpToDate_[timeIdx][globalIdx]) {
return nullptr;
}
if (timeIdx > 0 && enableStorageCache_)
// with the storage cache enabled, only the intensive quantities for the most
// recent time step are cached!
return 0;
// With the storage cache enabled, usually only the
// intensive quantities for the most recent time step are
// cached. However, this may be false for some Problem
// variants, so we should check if the cache exists for
// the timeIdx in question.
if (timeIdx > 0 && enableStorageCache_ && intensiveQuantityCache_[timeIdx].empty()) {
return nullptr;
}
return &intensiveQuantityCache_[timeIdx][globalIdx];
}
@ -800,7 +804,7 @@ public:
for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
const Element& elem = *elemIt;
elemCtx.updatePrimaryStencil(elem);
elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
elemCtx.updatePrimaryIntensiveQuantities(timeIdx);
}
}
}
@ -818,10 +822,13 @@ public:
if (!storeIntensiveQuantities())
return;
if (enableStorageCache()) {
// if the storage term is cached, the intensive quantities of the previous
if (enableStorageCache() && simulator_.problem().recycleFirstIterationStorage()) {
// If the storage term is cached, the intensive quantities of the previous
// time steps do not need to be accessed, and we can thus spare ourselves to
// copy the objects for the intensive quantities.
// However, if the storage term at the start of the timestep cannot be deduced
// from the primary variables, we must calculate it from the old intensive
// quantities, and need to shift them.
return;
}

View File

@ -32,6 +32,7 @@
#include "linearizationtype.hh"
#include <opm/common/Exceptions.hpp>
#include <opm/common/TimingMacros.hpp>
#include <opm/grid/utility/SparseTable.hpp>
#include <opm/models/parallel/gridcommhandles.hh>
@ -192,6 +193,7 @@ public:
*/
void linearizeDomain()
{
OPM_TIMEBLOCK(linearizeDomain);
// we defer the initialization of the Jacobian matrix until here because the
// auxiliary modules usually assume the problem, model and grid to be fully
// initialized...
@ -232,6 +234,7 @@ public:
*/
void linearizeAuxiliaryEquations()
{
OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
// flush possible local caches into matrix structure
jacobian_->commit();
@ -446,6 +449,7 @@ private:
// linearize the whole system
void linearize_()
{
OPM_TIMEBLOCK(linearize_);
resetSystem_();
// before the first iteration of each time step, we need to update the

View File

@ -532,6 +532,15 @@ public:
private:
void linearize_()
{
// This check should be removed once this is addressed by
// for example storing the previous timesteps' values for
// rsmax (for DRSDT) and similar.
if (!problem_().recycleFirstIterationStorage()) {
if (!model_().storeIntensiveQuantities() && !model_().enableStorageCache()) {
OPM_THROW(std::runtime_error, "Must have cached either IQs or storage when we cannot recycle.");
}
}
OPM_TIMEBLOCK(linearize);
resetSystem_();
unsigned numCells = model_().numTotalDof();
@ -547,11 +556,7 @@ private:
MatrixBlock bMat(0.0);
ADVectorBlock adres(0.0);
ADVectorBlock darcyFlux(0.0);
const IntensiveQuantities* intQuantsInP = model_().cachedIntensiveQuantities(globI, /*timeIdx*/ 0);
if (intQuantsInP == nullptr) {
throw std::logic_error("Missing updated intensive quantities for cell " + std::to_string(globI));
}
const IntensiveQuantities& intQuantsIn = *intQuantsInP;
const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, /*timeIdx*/ 0);
// Flux term.
{
@ -565,11 +570,7 @@ private:
bMat = 0.0;
adres = 0.0;
darcyFlux = 0.0;
const IntensiveQuantities* intQuantsExP = model_().cachedIntensiveQuantities(globJ, /*timeIdx*/ 0);
if (intQuantsExP == nullptr) {
throw std::logic_error("Missing updated intensive quantities for cell " + std::to_string(globJ) + " when assembling fluxes for cell " + std::to_string(globI));
}
const IntensiveQuantities& intQuantsEx = *intQuantsExP;
const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, /*timeIdx*/ 0);
LocalResidual::computeFlux(
adres, darcyFlux, problem_(), globI, globJ, intQuantsIn, intQuantsEx,
nbInfo.trans, nbInfo.faceArea, nbInfo.faceDirection);
@ -605,15 +606,36 @@ private:
LocalResidual::computeStorage(adres, intQuantsIn);
}
setResAndJacobi(res, bMat, adres);
// TODO: check recycleFirst etc.
// first we use it as storage cache
if (model_().newtonMethod().numIterations() == 0) {
model_().updateCachedStorage(globI, /*timeIdx=*/1, res);
// Either use cached storage term, or compute it on the fly.
if (model_().enableStorageCache()) {
// The cached storage for timeIdx 0 (current time) is not
// used, but after storage cache is shifted at the end of the
// timestep, it will become cached storage for timeIdx 1.
model_().updateCachedStorage(globI, /*timeIdx=*/0, res);
if (model_().newtonMethod().numIterations() == 0) {
// Need to update the storage cache.
if (problem_().recycleFirstIterationStorage()) {
// Assumes nothing have changed in the system which
// affects masses calculated from primary variables.
model_().updateCachedStorage(globI, /*timeIdx=*/1, res);
} else {
Dune::FieldVector<Scalar, numEq> tmp;
IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
LocalResidual::computeStorage(tmp, intQuantOld);
model_().updateCachedStorage(globI, /*timeIdx=*/1, tmp);
}
}
res -= model_().cachedStorage(globI, 1);
} else {
OPM_TIMEBLOCK_LOCAL(computeStorage0);
Dune::FieldVector<Scalar, numEq> tmp;
IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
LocalResidual::computeStorage(tmp, intQuantOld);
// assume volume do not change
res -= tmp;
}
res -= model_().cachedStorage(globI, 1);
res *= storefac;
bMat *= storefac;
// residual_[globI] -= model_().cachedStorage(globI, 1); //*storefac;
residual_[globI] += res;
//SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat);
*diagMatAddress_[globI] += bMat;