mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #806 from atgeirr/hnil-cache_less
Enable turning off storage cache.
This commit is contained in:
commit
876310f060
@ -690,7 +690,7 @@ public:
|
|||||||
if (sg < -eps && s > 0.0 && FluidSystem::enableDissolvedGas()) {
|
if (sg < -eps && s > 0.0 && FluidSystem::enableDissolvedGas()) {
|
||||||
const Scalar& po = (*this)[pressureSwitchIdx];
|
const Scalar& po = (*this)[pressureSwitchIdx];
|
||||||
setPrimaryVarsMeaningGas(GasMeaning::Rs);
|
setPrimaryVarsMeaningGas(GasMeaning::Rs);
|
||||||
Scalar soMax = problem.maxOilSaturation(globalDofIdx);
|
Scalar soMax = std::max(s, problem.maxOilSaturation(globalDofIdx));
|
||||||
Scalar rsMax = problem.maxGasDissolutionFactor(/*timeIdx=*/0, globalDofIdx);
|
Scalar rsMax = problem.maxGasDissolutionFactor(/*timeIdx=*/0, globalDofIdx);
|
||||||
Scalar rsSat = enableExtbo ? ExtboModule::rs(pvtRegionIndex(),
|
Scalar rsSat = enableExtbo ? ExtboModule::rs(pvtRegionIndex(),
|
||||||
po,
|
po,
|
||||||
|
@ -650,12 +650,12 @@ public:
|
|||||||
// synchronize the ghost DOFs (if necessary)
|
// synchronize the ghost DOFs (if necessary)
|
||||||
asImp_().syncOverlap();
|
asImp_().syncOverlap();
|
||||||
|
|
||||||
simulator_.problem().initialSolutionApplied();
|
|
||||||
|
|
||||||
// also set the solutions of the "previous" time steps to the initial solution.
|
// also set the solutions of the "previous" time steps to the initial solution.
|
||||||
for (unsigned timeIdx = 1; timeIdx < historySize; ++timeIdx)
|
for (unsigned timeIdx = 1; timeIdx < historySize; ++timeIdx)
|
||||||
solution(timeIdx) = solution(/*timeIdx=*/0);
|
solution(timeIdx) = solution(/*timeIdx=*/0);
|
||||||
|
|
||||||
|
simulator_.problem().initialSolutionApplied();
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
for (unsigned timeIdx = 0; timeIdx < historySize; ++timeIdx) {
|
for (unsigned timeIdx = 0; timeIdx < historySize; ++timeIdx) {
|
||||||
const auto& sol = solution(timeIdx);
|
const auto& sol = solution(timeIdx);
|
||||||
@ -723,14 +723,18 @@ public:
|
|||||||
*/
|
*/
|
||||||
const IntensiveQuantities* cachedIntensiveQuantities(unsigned globalIdx, unsigned timeIdx) const
|
const IntensiveQuantities* cachedIntensiveQuantities(unsigned globalIdx, unsigned timeIdx) const
|
||||||
{
|
{
|
||||||
if (!enableIntensiveQuantityCache_ ||
|
if (!enableIntensiveQuantityCache_ || !intensiveQuantityCacheUpToDate_[timeIdx][globalIdx]) {
|
||||||
!intensiveQuantityCacheUpToDate_[timeIdx][globalIdx])
|
return nullptr;
|
||||||
return 0;
|
}
|
||||||
|
|
||||||
if (timeIdx > 0 && enableStorageCache_)
|
// With the storage cache enabled, usually only the
|
||||||
// with the storage cache enabled, only the intensive quantities for the most
|
// intensive quantities for the most recent time step are
|
||||||
// recent time step are cached!
|
// cached. However, this may be false for some Problem
|
||||||
return 0;
|
// variants, so we should check if the cache exists for
|
||||||
|
// the timeIdx in question.
|
||||||
|
if (timeIdx > 0 && enableStorageCache_ && intensiveQuantityCache_[timeIdx].empty()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
return &intensiveQuantityCache_[timeIdx][globalIdx];
|
return &intensiveQuantityCache_[timeIdx][globalIdx];
|
||||||
}
|
}
|
||||||
@ -800,7 +804,7 @@ public:
|
|||||||
for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
|
for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
|
||||||
const Element& elem = *elemIt;
|
const Element& elem = *elemIt;
|
||||||
elemCtx.updatePrimaryStencil(elem);
|
elemCtx.updatePrimaryStencil(elem);
|
||||||
elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
|
elemCtx.updatePrimaryIntensiveQuantities(timeIdx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -818,10 +822,13 @@ public:
|
|||||||
if (!storeIntensiveQuantities())
|
if (!storeIntensiveQuantities())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (enableStorageCache()) {
|
if (enableStorageCache() && simulator_.problem().recycleFirstIterationStorage()) {
|
||||||
// if the storage term is cached, the intensive quantities of the previous
|
// If the storage term is cached, the intensive quantities of the previous
|
||||||
// time steps do not need to be accessed, and we can thus spare ourselves to
|
// time steps do not need to be accessed, and we can thus spare ourselves to
|
||||||
// copy the objects for the intensive quantities.
|
// copy the objects for the intensive quantities.
|
||||||
|
// However, if the storage term at the start of the timestep cannot be deduced
|
||||||
|
// from the primary variables, we must calculate it from the old intensive
|
||||||
|
// quantities, and need to shift them.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include "linearizationtype.hh"
|
#include "linearizationtype.hh"
|
||||||
|
|
||||||
#include <opm/common/Exceptions.hpp>
|
#include <opm/common/Exceptions.hpp>
|
||||||
|
#include <opm/common/TimingMacros.hpp>
|
||||||
#include <opm/grid/utility/SparseTable.hpp>
|
#include <opm/grid/utility/SparseTable.hpp>
|
||||||
|
|
||||||
#include <opm/models/parallel/gridcommhandles.hh>
|
#include <opm/models/parallel/gridcommhandles.hh>
|
||||||
@ -192,6 +193,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
void linearizeDomain()
|
void linearizeDomain()
|
||||||
{
|
{
|
||||||
|
OPM_TIMEBLOCK(linearizeDomain);
|
||||||
// we defer the initialization of the Jacobian matrix until here because the
|
// we defer the initialization of the Jacobian matrix until here because the
|
||||||
// auxiliary modules usually assume the problem, model and grid to be fully
|
// auxiliary modules usually assume the problem, model and grid to be fully
|
||||||
// initialized...
|
// initialized...
|
||||||
@ -232,6 +234,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
void linearizeAuxiliaryEquations()
|
void linearizeAuxiliaryEquations()
|
||||||
{
|
{
|
||||||
|
OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
|
||||||
// flush possible local caches into matrix structure
|
// flush possible local caches into matrix structure
|
||||||
jacobian_->commit();
|
jacobian_->commit();
|
||||||
|
|
||||||
@ -446,6 +449,7 @@ private:
|
|||||||
// linearize the whole system
|
// linearize the whole system
|
||||||
void linearize_()
|
void linearize_()
|
||||||
{
|
{
|
||||||
|
OPM_TIMEBLOCK(linearize_);
|
||||||
resetSystem_();
|
resetSystem_();
|
||||||
|
|
||||||
// before the first iteration of each time step, we need to update the
|
// before the first iteration of each time step, we need to update the
|
||||||
|
@ -532,6 +532,15 @@ public:
|
|||||||
private:
|
private:
|
||||||
void linearize_()
|
void linearize_()
|
||||||
{
|
{
|
||||||
|
// This check should be removed once this is addressed by
|
||||||
|
// for example storing the previous timesteps' values for
|
||||||
|
// rsmax (for DRSDT) and similar.
|
||||||
|
if (!problem_().recycleFirstIterationStorage()) {
|
||||||
|
if (!model_().storeIntensiveQuantities() && !model_().enableStorageCache()) {
|
||||||
|
OPM_THROW(std::runtime_error, "Must have cached either IQs or storage when we cannot recycle.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
OPM_TIMEBLOCK(linearize);
|
OPM_TIMEBLOCK(linearize);
|
||||||
resetSystem_();
|
resetSystem_();
|
||||||
unsigned numCells = model_().numTotalDof();
|
unsigned numCells = model_().numTotalDof();
|
||||||
@ -547,11 +556,7 @@ private:
|
|||||||
MatrixBlock bMat(0.0);
|
MatrixBlock bMat(0.0);
|
||||||
ADVectorBlock adres(0.0);
|
ADVectorBlock adres(0.0);
|
||||||
ADVectorBlock darcyFlux(0.0);
|
ADVectorBlock darcyFlux(0.0);
|
||||||
const IntensiveQuantities* intQuantsInP = model_().cachedIntensiveQuantities(globI, /*timeIdx*/ 0);
|
const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, /*timeIdx*/ 0);
|
||||||
if (intQuantsInP == nullptr) {
|
|
||||||
throw std::logic_error("Missing updated intensive quantities for cell " + std::to_string(globI));
|
|
||||||
}
|
|
||||||
const IntensiveQuantities& intQuantsIn = *intQuantsInP;
|
|
||||||
|
|
||||||
// Flux term.
|
// Flux term.
|
||||||
{
|
{
|
||||||
@ -565,11 +570,7 @@ private:
|
|||||||
bMat = 0.0;
|
bMat = 0.0;
|
||||||
adres = 0.0;
|
adres = 0.0;
|
||||||
darcyFlux = 0.0;
|
darcyFlux = 0.0;
|
||||||
const IntensiveQuantities* intQuantsExP = model_().cachedIntensiveQuantities(globJ, /*timeIdx*/ 0);
|
const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, /*timeIdx*/ 0);
|
||||||
if (intQuantsExP == nullptr) {
|
|
||||||
throw std::logic_error("Missing updated intensive quantities for cell " + std::to_string(globJ) + " when assembling fluxes for cell " + std::to_string(globI));
|
|
||||||
}
|
|
||||||
const IntensiveQuantities& intQuantsEx = *intQuantsExP;
|
|
||||||
LocalResidual::computeFlux(
|
LocalResidual::computeFlux(
|
||||||
adres, darcyFlux, problem_(), globI, globJ, intQuantsIn, intQuantsEx,
|
adres, darcyFlux, problem_(), globI, globJ, intQuantsIn, intQuantsEx,
|
||||||
nbInfo.trans, nbInfo.faceArea, nbInfo.faceDirection);
|
nbInfo.trans, nbInfo.faceArea, nbInfo.faceDirection);
|
||||||
@ -605,15 +606,36 @@ private:
|
|||||||
LocalResidual::computeStorage(adres, intQuantsIn);
|
LocalResidual::computeStorage(adres, intQuantsIn);
|
||||||
}
|
}
|
||||||
setResAndJacobi(res, bMat, adres);
|
setResAndJacobi(res, bMat, adres);
|
||||||
// TODO: check recycleFirst etc.
|
// Either use cached storage term, or compute it on the fly.
|
||||||
// first we use it as storage cache
|
if (model_().enableStorageCache()) {
|
||||||
if (model_().newtonMethod().numIterations() == 0) {
|
// The cached storage for timeIdx 0 (current time) is not
|
||||||
model_().updateCachedStorage(globI, /*timeIdx=*/1, res);
|
// used, but after storage cache is shifted at the end of the
|
||||||
|
// timestep, it will become cached storage for timeIdx 1.
|
||||||
|
model_().updateCachedStorage(globI, /*timeIdx=*/0, res);
|
||||||
|
if (model_().newtonMethod().numIterations() == 0) {
|
||||||
|
// Need to update the storage cache.
|
||||||
|
if (problem_().recycleFirstIterationStorage()) {
|
||||||
|
// Assumes nothing have changed in the system which
|
||||||
|
// affects masses calculated from primary variables.
|
||||||
|
model_().updateCachedStorage(globI, /*timeIdx=*/1, res);
|
||||||
|
} else {
|
||||||
|
Dune::FieldVector<Scalar, numEq> tmp;
|
||||||
|
IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
|
||||||
|
LocalResidual::computeStorage(tmp, intQuantOld);
|
||||||
|
model_().updateCachedStorage(globI, /*timeIdx=*/1, tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res -= model_().cachedStorage(globI, 1);
|
||||||
|
} else {
|
||||||
|
OPM_TIMEBLOCK_LOCAL(computeStorage0);
|
||||||
|
Dune::FieldVector<Scalar, numEq> tmp;
|
||||||
|
IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
|
||||||
|
LocalResidual::computeStorage(tmp, intQuantOld);
|
||||||
|
// assume volume do not change
|
||||||
|
res -= tmp;
|
||||||
}
|
}
|
||||||
res -= model_().cachedStorage(globI, 1);
|
|
||||||
res *= storefac;
|
res *= storefac;
|
||||||
bMat *= storefac;
|
bMat *= storefac;
|
||||||
// residual_[globI] -= model_().cachedStorage(globI, 1); //*storefac;
|
|
||||||
residual_[globI] += res;
|
residual_[globI] += res;
|
||||||
//SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat);
|
//SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat);
|
||||||
*diagMatAddress_[globI] += bMat;
|
*diagMatAddress_[globI] += bMat;
|
||||||
|
Loading…
Reference in New Issue
Block a user