Merge pull request #5724 from totto82/changeNUPCOL

Changes in how the simulator handles NUPCOL
This commit is contained in:
Tor Harald Sandve 2024-12-02 10:29:09 +01:00 committed by GitHub
commit 42e17219b6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 91 additions and 17 deletions

View File

@ -102,6 +102,8 @@ BlackoilModelParameters<Scalar>::BlackoilModelParameters()
convergence_monitoring_ = Parameters::Get<Parameters::ConvergenceMonitoring>(); convergence_monitoring_ = Parameters::Get<Parameters::ConvergenceMonitoring>();
convergence_monitoring_cutoff_ = Parameters::Get<Parameters::ConvergenceMonitoringCutOff>(); convergence_monitoring_cutoff_ = Parameters::Get<Parameters::ConvergenceMonitoringCutOff>();
convergence_monitoring_decay_factor_ = Parameters::Get<Parameters::ConvergenceMonitoringDecayFactor<Scalar>>(); convergence_monitoring_decay_factor_ = Parameters::Get<Parameters::ConvergenceMonitoringDecayFactor<Scalar>>();
nupcol_group_rate_tolerance_ = Parameters::Get<Parameters::NupcolGroupRateTolerance<Scalar>>();
} }
template<class Scalar> template<class Scalar>
@ -251,6 +253,9 @@ void BlackoilModelParameters<Scalar>::registerParameters()
Parameters::Register<Parameters::ConvergenceMonitoringDecayFactor<Scalar>> Parameters::Register<Parameters::ConvergenceMonitoringDecayFactor<Scalar>>
("Decay factor for convergence monitoring"); ("Decay factor for convergence monitoring");
Parameters::Register<Parameters::NupcolGroupRateTolerance<Scalar>>
("Tolerance for acceptable changes in VREP/RAIN group rates");
Parameters::Hide<Parameters::DebugEmitCellPartition>(); Parameters::Hide<Parameters::DebugEmitCellPartition>();
// if openMP is available, determine the number threads per process automatically. // if openMP is available, determine the number threads per process automatically.

View File

@ -152,6 +152,9 @@ struct ConvergenceMonitoringCutOff { static constexpr int value = 6; };
template<class Scalar> template<class Scalar>
struct ConvergenceMonitoringDecayFactor { static constexpr Scalar value = 0.75; }; struct ConvergenceMonitoringDecayFactor { static constexpr Scalar value = 0.75; };
template<class Scalar>
struct NupcolGroupRateTolerance { static constexpr Scalar value = 0.001; };
} // namespace Opm::Parameters } // namespace Opm::Parameters
namespace Opm { namespace Opm {
@ -315,6 +318,10 @@ public:
/// Decay factor used in convergence monitoring /// Decay factor used in convergence monitoring
Scalar convergence_monitoring_decay_factor_; Scalar convergence_monitoring_decay_factor_;
// Relative tolerance of group rates (VREP, REIN)
// If violated the nupcol wellstate is updated
Scalar nupcol_group_rate_tolerance_;
/// Construct from user parameters or defaults. /// Construct from user parameters or defaults.
BlackoilModelParameters(); BlackoilModelParameters();

View File

@ -1185,7 +1185,9 @@ groupAndNetworkData(const int reportStepIdx) const
template<class Scalar> template<class Scalar>
void BlackoilWellModelGeneric<Scalar>:: void BlackoilWellModelGeneric<Scalar>::
updateAndCommunicateGroupData(const int reportStepIdx, updateAndCommunicateGroupData(const int reportStepIdx,
const int iterationIdx) const int iterationIdx,
const Scalar tol_nupcol,
DeferredLogger& deferred_logger)
{ {
const Group& fieldGroup = schedule().getGroup("FIELD", reportStepIdx); const Group& fieldGroup = schedule().getGroup("FIELD", reportStepIdx);
const int nupcol = schedule()[reportStepIdx].nupcol(); const int nupcol = schedule()[reportStepIdx].nupcol();
@ -1199,8 +1201,53 @@ updateAndCommunicateGroupData(const int reportStepIdx,
// before we copy to well_state_nupcol_. // before we copy to well_state_nupcol_.
this->wellState().updateGlobalIsGrup(comm_); this->wellState().updateGlobalIsGrup(comm_);
if (iterationIdx < nupcol) { if (iterationIdx <= nupcol) {
this->updateNupcolWGState(); this->updateNupcolWGState();
} else {
for (const auto& gr_name : schedule().groupNames(reportStepIdx)) {
const Phase all[] = { Phase::WATER, Phase::OIL, Phase::GAS };
for (Phase phase : all) {
if (this->groupState().has_injection_control(gr_name, phase)) {
if (this->groupState().injection_control(gr_name, phase) == Group::InjectionCMode::VREP ||
this->groupState().injection_control(gr_name, phase) == Group::InjectionCMode::REIN) {
const bool is_vrep = this->groupState().injection_control(gr_name, phase) == Group::InjectionCMode::VREP;
const Group& group = schedule().getGroup(gr_name, reportStepIdx);
const int np = this->wellState().numPhases();
Scalar gr_rate_nupcol = 0.0;
for (int phaseIdx = 0; phaseIdx < np; ++phaseIdx) {
gr_rate_nupcol += WellGroupHelpers<Scalar>::sumWellPhaseRates(is_vrep,
group,
schedule(),
this->nupcolWellState(),
reportStepIdx,
phaseIdx,
/*isInjector*/ false);
}
Scalar gr_rate = 0.0;
for (int phaseIdx = 0; phaseIdx < np; ++phaseIdx) {
gr_rate += WellGroupHelpers<Scalar>::sumWellPhaseRates(is_vrep,
group,
schedule(),
this->wellState(),
reportStepIdx,
phaseIdx,
/*isInjector*/ false);
}
Scalar small_rate = 1e-12; // m3/s
Scalar denominator = (0.5*gr_rate_nupcol + 0.5*gr_rate);
Scalar rel_change = denominator > small_rate ? std::abs( (gr_rate_nupcol - gr_rate) / denominator) : 0.0;
if ( rel_change > tol_nupcol) {
this->updateNupcolWGState();
const std::string control_str = is_vrep? "VREP" : "REIN";
const std::string msg = fmt::format("Group prodution relative change {} larger than tolerance {} "
"at iteration {}. Update {} for Group {} even if iteration is larger than {} given by NUPCOL." ,
rel_change, tol_nupcol, iterationIdx, control_str, gr_name, nupcol);
deferred_logger.debug(msg);
}
}
}
}
}
} }

View File

@ -378,7 +378,9 @@ protected:
const int reportStepIdx); const int reportStepIdx);
void updateAndCommunicateGroupData(const int reportStepIdx, void updateAndCommunicateGroupData(const int reportStepIdx,
const int iterationIdx); const int iterationIdx,
const Scalar tol_nupcol,
DeferredLogger& deferred_logger);
void inferLocalShutWells(); void inferLocalShutWells();

View File

@ -472,7 +472,9 @@ namespace Opm {
const int reportStepIdx = simulator_.episodeIndex(); const int reportStepIdx = simulator_.episodeIndex();
this->updateAndCommunicateGroupData(reportStepIdx, this->updateAndCommunicateGroupData(reportStepIdx,
simulator_.model().newtonMethod().numIterations()); simulator_.model().newtonMethod().numIterations(),
param_.nupcol_group_rate_tolerance_,
local_deferredLogger);
this->wellState().updateWellsDefaultALQ(this->schedule(), reportStepIdx, this->summaryState()); this->wellState().updateWellsDefaultALQ(this->schedule(), reportStepIdx, this->summaryState());
this->wellState().gliftTimeStepInit(); this->wellState().gliftTimeStepInit();
@ -2179,7 +2181,7 @@ namespace Opm {
const int iterationIdx = simulator_.model().newtonMethod().numIterations(); const int iterationIdx = simulator_.model().newtonMethod().numIterations();
const auto& comm = simulator_.vanguard().grid().comm(); const auto& comm = simulator_.vanguard().grid().comm();
this->updateAndCommunicateGroupData(episodeIdx, iterationIdx); this->updateAndCommunicateGroupData(episodeIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger);
// network related // network related
bool more_network_update = false; bool more_network_update = false;
@ -2439,7 +2441,7 @@ namespace Opm {
const int iterationIdx, const int iterationIdx,
DeferredLogger& deferred_logger) DeferredLogger& deferred_logger)
{ {
this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx); this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger);
// updateWellStateWithTarget might throw for multisegment wells hence we // updateWellStateWithTarget might throw for multisegment wells hence we
// have a parallel try catch here to thrown on all processes. // have a parallel try catch here to thrown on all processes.
@ -2455,7 +2457,7 @@ namespace Opm {
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAndCommunicate failed: ", OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAndCommunicate failed: ",
simulator_.gridView().comm()) simulator_.gridView().comm())
this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx); this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger);
} }
template<typename TypeTag> template<typename TypeTag>

View File

@ -75,14 +75,19 @@ namespace {
return {oilRate, gasRate, waterRate}; return {oilRate, gasRate, waterRate};
} }
} // namespace Anonymous
namespace Opm {
template<class Scalar> template<class Scalar>
Scalar sumWellPhaseRates(bool res_rates, Scalar WellGroupHelpers<Scalar>::
const Opm::Group& group, sumWellPhaseRates(bool res_rates,
const Opm::Schedule& schedule, const Opm::Group& group,
const Opm::WellState<Scalar>& wellState, const Opm::Schedule& schedule,
const int reportStepIdx, const Opm::WellState<Scalar>& wellState,
const int phasePos, const int reportStepIdx,
const bool injector) const int phasePos,
const bool injector)
{ {
Scalar rate = 0.0; Scalar rate = 0.0;
@ -128,9 +133,6 @@ namespace {
} }
return rate; return rate;
} }
} // namespace Anonymous
namespace Opm {
template<class Scalar> template<class Scalar>
void WellGroupHelpers<Scalar>:: void WellGroupHelpers<Scalar>::

View File

@ -47,6 +47,15 @@ template<class Scalar>
class WellGroupHelpers class WellGroupHelpers
{ {
public: public:
static Scalar sumWellPhaseRates(bool res_rates,
const Opm::Group& group,
const Opm::Schedule& schedule,
const Opm::WellState<Scalar>& wellState,
const int reportStepIdx,
const int phasePos,
const bool injector);
static void setCmodeGroup(const Group& group, static void setCmodeGroup(const Group& group,
const Schedule& schedule, const Schedule& schedule,
const SummaryState& summaryState, const SummaryState& summaryState,