Merge pull request #5680 from lisajulia/feature/ms-wells

Feature/ms wells - part 1: Initial assembly of B C D and the residual
This commit is contained in:
Kai Bao
2024-11-21 22:54:48 +01:00
committed by GitHub
20 changed files with 419 additions and 135 deletions

View File

@@ -272,7 +272,11 @@ namespace Opm {
Dune::Timer perfTimer;
perfTimer.start();
// update the solution variables in the model
if ( timer.lastStepFailed() ) {
int lastStepFailed = timer.lastStepFailed();
if (grid_.comm().size() > 1 && lastStepFailed != grid_.comm().min(lastStepFailed)) {
OPM_THROW(std::runtime_error, fmt::format("Misalignment of the parallel simulation run in prepareStep - the previous step succeeded on rank {} but failed on the other ranks.", grid_.comm().rank()));
}
if ( lastStepFailed ) {
simulator_.model().updateFailed();
} else {
simulator_.model().advanceTimeLevel();

View File

@@ -96,6 +96,10 @@ assembleControlEq(const WellState<Scalar>& well_state,
const bool stopped_or_zero_target,
DeferredLogger& deferred_logger) const
{
/*
This function assembles the control equation, similar as for StandardWells.
It does *not* need communication.
*/
static constexpr int Gas = BlackoilPhases::Vapour;
static constexpr int Oil = BlackoilPhases::Liquid;
static constexpr int Water = BlackoilPhases::Aqua;
@@ -212,6 +216,12 @@ assembleAccelerationTerm(const int seg_target,
// acceleration term shold be
// * velocity head for seg_target if seg = seg_target
// * negative velocity head for seg if seg != seg_target
/*
This method is called in MultisegmentWellEval::assembleAccelerationPressureLoss.
It does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg_target][SPres] -= accelerationTerm.value();
eqns.D()[seg_target][seg][SPres][SPres] -= accelerationTerm.derivative(SPres + Indices::numEq);
@@ -231,6 +241,11 @@ assembleHydroPressureLoss(const int seg,
const EvalWell& hydro_pressure_drop_seg,
Equations& eqns1) const
{
/*
This method is called in MultisegmentWellEval::assembleAccelerationAndHydroPressureLosses.
It does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg][SPres] -= hydro_pressure_drop_seg.value();
for (int pv_idx = 0; pv_idx < numWellEq; ++pv_idx) {
@@ -246,6 +261,9 @@ assemblePressureEqExtraDerivatives(const int seg,
const EvalWell& extra_derivatives,
Equations& eqns1) const
{
/*
This method does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
// disregard residual
// Frac - derivatives are zero (they belong to upwind^2)
@@ -265,6 +283,9 @@ assemblePressureEq(const int seg,
bool wfrac,
bool gfrac) const
{
/*
This method does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg][SPres] += pressure_equation.value();
eqns.D()[seg][seg][SPres][SPres] += pressure_equation.derivative(SPres + Indices::numEq);
@@ -289,6 +310,13 @@ assembleTrivialEq(const int seg,
const Scalar value,
Equations& eqns1) const
{
/*
This method is called from MultisegmentWellEval::assembleICDPressureEq,
which is called from MultisegmentWellEval::assemblePressureEq.
This is the counterpart to assembleControlEquation, where assembleControlEquation is responsible for the top segment
and assembleICDPressureEq is responsible for the remaining segments.
This method does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg][SPres] = value;
eqns.D()[seg][seg][SPres][WQTotal] = 1.;
@@ -301,6 +329,10 @@ assembleAccumulationTerm(const int seg,
const EvalWell& accumulation_term,
Equations& eqns1) const
{
/*
This method is called from MultisegmentWell::assembleWellEqWithoutIteration.
It only assembles on the diagonal of D and it does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg][comp_idx] += accumulation_term.value();
for (int pv_idx = 0; pv_idx < numWellEq; ++pv_idx) {
@@ -316,6 +348,10 @@ assembleOutflowTerm(const int seg,
const EvalWell& segment_rate,
Equations& eqns1) const
{
/*
This method is called from MultisegmentWell::assembleWellEqWithoutIteration.
It does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg][comp_idx] -= segment_rate.value();
eqns.D()[seg][seg][comp_idx][WQTotal] -= segment_rate.derivative(WQTotal + Indices::numEq);
@@ -337,6 +373,10 @@ assembleInflowTerm(const int seg,
const EvalWell& inlet_rate,
Equations& eqns1) const
{
/*
This method is called from MultisegmentWell::assembleWellEqWithoutIteration.
It does *not* need communication.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
eqns.residual()[seg][comp_idx] += inlet_rate.value();
eqns.D()[seg][inlet][comp_idx][WQTotal] += inlet_rate.derivative(WQTotal + Indices::numEq);
@@ -352,11 +392,17 @@ assembleInflowTerm(const int seg,
template<class FluidSystem, class Indices>
void MultisegmentWellAssemble<FluidSystem,Indices>::
assemblePerforationEq(const int seg,
const int cell_idx,
const int local_perf_index,
const int comp_idx,
const EvalWell& cq_s_effective,
Equations& eqns1) const
{
/*
This method is called from MultisegmentWell::assembleWellEqWithoutIteration.
It *does* need communication, i.e. this method only assembles the parts of the matrix this process is responsible for
and after calling this function, the diagonal of the matrix D and the residual need to be combined by calling
the function MultisegmentWellEquations::sumDistributed.
*/
MultisegmentWellEquationAccess<Scalar,numWellEq,Indices::numEq> eqns(eqns1);
// subtract sum of phase fluxes in the well equations.
eqns.residual()[seg][comp_idx] += cq_s_effective.value();
@@ -364,7 +410,7 @@ assemblePerforationEq(const int seg,
// assemble the jacobians
for (int pv_idx = 0; pv_idx < numWellEq; ++pv_idx) {
// also need to consider the efficiency factor when manipulating the jacobians.
eqns.C()[seg][cell_idx][pv_idx][comp_idx] -= cq_s_effective.derivative(pv_idx + Indices::numEq); // input in transformed matrix
eqns.C()[seg][local_perf_index][pv_idx][comp_idx] -= cq_s_effective.derivative(pv_idx + Indices::numEq); // input in transformed matrix
// the index name for the D should be eq_idx / pv_idx
eqns.D()[seg][seg][comp_idx][pv_idx] += cq_s_effective.derivative(pv_idx + Indices::numEq);
@@ -372,7 +418,7 @@ assemblePerforationEq(const int seg,
for (int pv_idx = 0; pv_idx < Indices::numEq; ++pv_idx) {
// also need to consider the efficiency factor when manipulating the jacobians.
eqns.B()[seg][cell_idx][comp_idx][pv_idx] += cq_s_effective.derivative(pv_idx);
eqns.B()[seg][local_perf_index][comp_idx][pv_idx] += cq_s_effective.derivative(pv_idx);
}
}

View File

@@ -139,7 +139,7 @@ public:
//! \brief Assemble equation for a perforation.
void assemblePerforationEq(const int seg,
const int cell_idx,
const int local_perf_index,
const int comp_idx,
const EvalWell& cq_s_effective,
Equations& eqns) const;

View File

@@ -37,6 +37,7 @@
#include <opm/simulators/linalg/matrixblock.hh>
#include <opm/simulators/linalg/SmallDenseMatrixUtils.hpp>
#include <opm/simulators/wells/WellHelpers.hpp>
#include <opm/simulators/wells/MSWellHelpers.hpp>
#include <opm/simulators/wells/MultisegmentWellGeneric.hpp>
#include <opm/simulators/wells/WellInterfaceGeneric.hpp>
@@ -48,8 +49,9 @@ namespace Opm {
template<class Scalar, int numWellEq, int numEq>
MultisegmentWellEquations<Scalar,numWellEq,numEq>::
MultisegmentWellEquations(const MultisegmentWellGeneric<Scalar>& well)
MultisegmentWellEquations(const MultisegmentWellGeneric<Scalar>& well, const ParallelWellInfo<Scalar>& pw_info)
: well_(well)
, pw_info_(pw_info)
{
}
@@ -107,7 +109,10 @@ init(const int numPerfs,
end = duneC_.createend(); row != end; ++row) {
// the number of the row corresponds to the segment number now.
for (const int& perf : perforations[row.index()]) {
row.insert(perf);
const int local_perf_index = pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
row.insert(local_perf_index);
}
}
@@ -116,7 +121,10 @@ init(const int numPerfs,
end = duneB_.createend(); row != end; ++row) {
// the number of the row corresponds to the segment number now.
for (const int& perf : perforations[row.index()]) {
row.insert(perf);
const int local_perf_index = pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
row.insert(local_perf_index);
}
}
@@ -402,6 +410,16 @@ extractCPRPressureMatrix(PressureMatrix& jacobian,
}
}
template<class Scalar, int numWellEq, int numEq>
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
sumDistributed(Parallel::Communication comm)
{
// accumulate resWell_ and duneD_ in parallel to get effects of all perforations (might be distributed)
// we need to do this for all segments in the residual and on the diagonal of D
for (int seg = 0; seg < well_.numberOfSegments(); ++seg)
Opm::wellhelpers::sumDistributedWellEntries(duneD_[seg][seg], resWell_[seg], comm);
}
#define INSTANTIATE(T, numWellEq, numEq) \
template class MultisegmentWellEquations<T,numWellEq,numEq>; \
template void MultisegmentWellEquations<T,numWellEq,numEq>:: \

View File

@@ -22,6 +22,8 @@
#ifndef OPM_MULTISEGMENTWELL_EQUATIONS_HEADER_INCLUDED
#define OPM_MULTISEGMENTWELL_EQUATIONS_HEADER_INCLUDED
#include <opm/simulators/utils/ParallelCommunication.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <dune/common/fmatrix.hh>
#include <dune/common/fvector.hh>
#include <dune/istl/bcrsmatrix.hh>
@@ -67,7 +69,7 @@ public:
using OffDiagMatrixBlockWellType = Dune::FieldMatrix<Scalar,numWellEq,numEq>;
using OffDiagMatWell = Dune::BCRSMatrix<OffDiagMatrixBlockWellType>;
MultisegmentWellEquations(const MultisegmentWellGeneric<Scalar>& well);
MultisegmentWellEquations(const MultisegmentWellGeneric<Scalar>& well, const ParallelWellInfo<Scalar>& pw_info);
//! \brief Setup sparsity pattern for the matrices.
//! \param numPerfs Number of perforations
@@ -120,6 +122,9 @@ public:
const int seg_pressure_var_ind,
const WellState<Scalar>& well_state) const;
//! \brief Sum with off-process contribution.
void sumDistributed(Parallel::Communication comm);
//! \brief Returns a const reference to the residual.
const BVectorWell& residual() const
{
@@ -146,6 +151,8 @@ public:
// Store the global index of well perforated cells
std::vector<int> cells_;
const ParallelWellInfo<Scalar>& pw_info_;
};
}

View File

@@ -53,12 +53,13 @@ namespace Opm
template<typename FluidSystem, typename Indices>
MultisegmentWellEval<FluidSystem,Indices>::
MultisegmentWellEval(WellInterfaceIndices<FluidSystem,Indices>& baseif)
MultisegmentWellEval(WellInterfaceIndices<FluidSystem,Indices>& baseif, const ParallelWellInfo<Scalar>& pw_info)
: MultisegmentWellGeneric<Scalar>(baseif)
, pw_info_(pw_info)
, baseif_(baseif)
, linSys_(*this)
, linSys_(*this, pw_info)
, primary_variables_(baseif)
, segments_(this->numberOfSegments(), baseif)
, segments_(this->numberOfSegments(), pw_info.communication().sum(baseif.numPerfs()), baseif)
, cell_perforation_depth_diffs_(baseif_.numPerfs(), 0.0)
, cell_perforation_pressure_diffs_(baseif_.numPerfs(), 0.0)
{

View File

@@ -26,6 +26,7 @@
#include <opm/simulators/wells/MultisegmentWellGeneric.hpp>
#include <opm/simulators/wells/MultisegmentWellPrimaryVariables.hpp>
#include <opm/simulators/wells/MultisegmentWellSegments.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/material/densead/Evaluation.hpp>
@@ -67,9 +68,10 @@ public:
//! \brief Returns a const reference to equation system.
const Equations& linSys() const
{ return linSys_; }
const ParallelWellInfo<Scalar>& pw_info_;
protected:
MultisegmentWellEval(WellInterfaceIndices<FluidSystem,Indices>& baseif);
MultisegmentWellEval(WellInterfaceIndices<FluidSystem,Indices>& baseif, const ParallelWellInfo<Scalar>& pw_info);
void initMatrixAndVectors();

View File

@@ -57,6 +57,19 @@ scaleSegmentRatesWithWellRates(const std::vector<std::vector<int>>& segment_inle
auto& ws = well_state.well(baseif_.indexOfWell());
auto& segments = ws.segments;
auto& segment_rates = segments.rates;
Scalar sumTw = 0;
bool calculateSumTw = std::any_of(segment_rates.begin(), segment_rates.end(), [](const auto& unscaled_top_seg_rate) {
return std::abs(unscaled_top_seg_rate) <= 1e-12;
});
if (calculateSumTw) {
// Due to various reasons, the well/top segment rate can be zero for this phase.
// We can not scale this rate directly. The following approach is used to initialize the segment rates.
for (int perf = 0; perf < baseif_.numPerfs(); ++perf) {
sumTw += baseif_.wellIndex()[perf];
}
// We need to communicate here to scale the perf_phaserate_scaled with the contribution of all segments
sumTw = ws.parallel_info.get().communication().sum(sumTw);
}
for (int phase = 0; phase < baseif_.numPhases(); ++phase) {
const Scalar unscaled_top_seg_rate = segment_rates[phase];
const Scalar well_phase_rate = ws.surface_rates[phase];
@@ -64,14 +77,7 @@ scaleSegmentRatesWithWellRates(const std::vector<std::vector<int>>& segment_inle
for (int seg = 0; seg < numberOfSegments(); ++seg) {
segment_rates[baseif_.numPhases() * seg + phase] *= well_phase_rate / unscaled_top_seg_rate;
}
} else {
// Due to various reasons, the well/top segment rate can be zero for this phase.
// We can not scale this rate directly. The following approach is used to initialize the segment rates.
Scalar sumTw = 0;
for (int perf = 0; perf < baseif_.numPerfs(); ++perf) {
sumTw += baseif_.wellIndex()[perf];
}
} else { // In this case, we calculated sumTw
// only handling this specific phase
constexpr Scalar num_single_phase = 1;
std::vector<Scalar> perforation_rates(num_single_phase * baseif_.numPerfs(), 0.0);
@@ -81,7 +87,8 @@ scaleSegmentRatesWithWellRates(const std::vector<std::vector<int>>& segment_inle
}
std::vector<Scalar> rates;
WellState<Scalar>::calculateSegmentRates(segment_inlets,
WellState<Scalar>::calculateSegmentRates(ws.parallel_info,
segment_inlets,
segment_perforations,
perforation_rates,
num_single_phase, 0, rates);

View File

@@ -60,9 +60,10 @@ namespace Opm
template<class FluidSystem, class Indices>
MultisegmentWellSegments<FluidSystem,Indices>::
MultisegmentWellSegments(const int numSegments,
const int num_perfs_whole_mswell,
WellInterfaceGeneric<Scalar>& well)
: perforations_(numSegments)
, perforation_depth_diffs_(well.numPerfs(), 0.0)
, perforation_depth_diffs_(num_perfs_whole_mswell, 0.0)
, inlets_(well.wellEcl().getSegments().size())
, depth_diffs_(numSegments, 0.0)
, densities_(numSegments, 0.0)
@@ -85,7 +86,8 @@ MultisegmentWellSegments(const int numSegments,
// the current implementation is a temporary solution for now, it should be corrected from the parser
// side
int i_perf_wells = 0;
well.perfDepth().resize(well_.numPerfs(), 0.);
// The perfDepth vector will contain the depths of all perforations across all processes of this well!
well.perfDepth().resize(num_perfs_whole_mswell, 0.0);
const auto& segment_set = well_.wellEcl().getSegments();
for (std::size_t perf = 0; perf < completion_set.size(); ++perf) {
const Connection& connection = completion_set.get(perf);

View File

@@ -49,6 +49,7 @@ class MultisegmentWellSegments
public:
MultisegmentWellSegments(const int numSegments,
const int num_perfs_whole_mswell,
WellInterfaceGeneric<Scalar>& well);
void computeFluidProperties(const EvalWell& temperature,
@@ -148,6 +149,9 @@ private:
// depth difference between the segment and the perforation
// or in another way, the depth difference between the perforation and
// the segment the perforation belongs to
// This vector contains the depth differences for *all* perforations across all processes
// that this well lies on, its size is well.wellEcl().getConnections().size(),
// also it works with *global* perforation indices!
std::vector<Scalar> perforation_depth_diffs_;
// the inlet segments for each segment. It is for convenience and efficiency reason

View File

@@ -67,7 +67,7 @@ namespace Opm
const int index_of_well,
const std::vector<PerforationData<Scalar>>& perf_data)
: Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, perf_data)
, MSWEval(static_cast<WellInterfaceIndices<FluidSystem,Indices>&>(*this))
, MSWEval(static_cast<WellInterfaceIndices<FluidSystem,Indices>&>(*this), pw_info)
, regularize_(false)
, segment_fluid_initial_(this->numberOfSegments(), std::vector<Scalar>(this->num_components_, 0.0))
{
@@ -136,9 +136,11 @@ namespace Opm
this->initMatrixAndVectors();
// calculate the depth difference between the perforations and the perforated grid block
for (int perf = 0; perf < this->number_of_perforations_; ++perf) {
const int cell_idx = this->well_cells_[perf];
this->cell_perforation_depth_diffs_[perf] = depth_arg[cell_idx] - this->perf_depth_[perf];
for (int local_perf_index = 0; local_perf_index < this->number_of_perforations_; ++local_perf_index) {
// This variable loops over the number_of_perforations_ of *this* process, hence it is *local*.
const int cell_idx = this->well_cells_[local_perf_index];
// Here we need to access the perf_depth_ at the global perforation index though!
this->cell_perforation_depth_diffs_[local_perf_index] = depth_arg[cell_idx] - this->perf_depth_[this->pw_info_.localToGlobal(local_perf_index)];
}
}
@@ -358,14 +360,17 @@ namespace Opm
const auto& segment_pressure = segments_copy.pressure;
for (int seg = 0; seg < nseg; ++seg) {
for (const int perf : this->segments_.perforations()[seg]) {
const int cell_idx = this->well_cells_[perf];
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
const int cell_idx = this->well_cells_[local_perf_index];
const auto& intQuants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
// flux for each perforation
std::vector<Scalar> mob(this->num_components_, 0.);
getMobility(simulator, perf, mob, deferred_logger);
getMobility(simulator, local_perf_index, mob, deferred_logger);
const Scalar trans_mult = simulator.problem().template wellTransMultiplier<Scalar>(intQuants, cell_idx);
const auto& wellstate_nupcol = simulator.problem().wellModel().nupcolWellState().well(this->index_of_well_);
const std::vector<Scalar> Tw = this->wellIndex(perf, intQuants, trans_mult, wellstate_nupcol);
const std::vector<Scalar> Tw = this->wellIndex(local_perf_index, intQuants, trans_mult, wellstate_nupcol);
const Scalar seg_pressure = segment_pressure[seg];
std::vector<Scalar> cq_s(this->num_components_, 0.);
Scalar perf_press = 0.0;
@@ -780,6 +785,10 @@ namespace Opm
};
std::vector<Scalar> mob(this->num_components_, 0.0);
// The subsetPerfID loops over 0 .. this->perf_data_->size().
// *(this->perf_data_) contains info about the local processes only,
// hence subsetPerfID is a local perf id and we can call getMobility
// directly with that.
getMobility(simulator, static_cast<int>(subsetPerfID), mob, deferred_logger);
const auto& fs = fluidState(subsetPerfID);
@@ -799,6 +808,12 @@ namespace Opm
connPI += np;
}
// Sum with communication in case of distributed well.
const auto& comm = this->parallel_well_info_.communication();
if (comm.size() > 1) {
comm.sum(wellPI, np);
}
assert (static_cast<int>(subsetPerfID) == this->number_of_perforations_ &&
"Internal logic error in processing connections for PI/II");
}
@@ -878,11 +893,15 @@ namespace Opm
PerforationRates<Scalar>& perf_rates,
DeferredLogger& deferred_logger) const
{
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
return;
// pressure difference between the segment and the perforation
const Value perf_seg_press_diff = this->gravity() * segment_density *
this->segments_.perforation_depth_diff(perf);
// pressure difference between the perforation and the grid cell
const Scalar cell_perf_press_diff = this->cell_perforation_pressure_diffs_[perf];
const Scalar cell_perf_press_diff = this->cell_perforation_pressure_diffs_[local_perf_index];
// perforation pressure is the wellbore pressure corrected to perforation depth
// (positive sign due to convention in segments_.perforation_depth_diff() )
@@ -1114,7 +1133,7 @@ namespace Opm
void
MultisegmentWell<TypeTag>::
getMobility(const Simulator& simulator,
const int perf,
const int local_perf_index,
std::vector<Value>& mob,
DeferredLogger& deferred_logger) const
{
@@ -1128,10 +1147,10 @@ namespace Opm
}
};
WellInterface<TypeTag>::getMobility(simulator, perf, mob, obtain, deferred_logger);
WellInterface<TypeTag>::getMobility(simulator, local_perf_index, mob, obtain, deferred_logger);
if (this->isInjector() && this->well_ecl_.getInjMultMode() != Well::InjMultMode::NONE) {
const auto perf_ecl_index = this->perforationData()[perf].ecl_index;
const auto perf_ecl_index = this->perforationData()[local_perf_index].ecl_index;
const Connection& con = this->well_ecl_.getConnections()[perf_ecl_index];
const int seg = this->segmentNumberToIndex(con.segment());
// from the reference results, it looks like MSW uses segment pressure instead of BHP here
@@ -1139,9 +1158,9 @@ namespace Opm
// we can change this depending on what we want
const Scalar segment_pres = this->primary_variables_.getSegmentPressure(seg).value();
const Scalar perf_seg_press_diff = this->gravity() * this->segments_.density(seg).value()
* this->segments_.perforation_depth_diff(perf);
* this->segments_.perforation_depth_diff(local_perf_index);
const Scalar perf_press = segment_pres + perf_seg_press_diff;
const Scalar multiplier = this->getInjMult(perf, segment_pres, perf_press, deferred_logger);
const Scalar multiplier = this->getInjMult(local_perf_index, segment_pres, perf_press, deferred_logger);
for (std::size_t i = 0; i < mob.size(); ++i) {
mob[i] *= multiplier;
}
@@ -1244,18 +1263,21 @@ namespace Opm
seg_dp);
seg_dp[seg] = dp;
for (const int perf : this->segments_.perforations()[seg]) {
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
std::vector<Scalar> mob(this->num_components_, 0.0);
// TODO: maybe we should store the mobility somewhere, so that we only need to calculate it one per iteration
getMobility(simulator, perf, mob, deferred_logger);
getMobility(simulator, local_perf_index, mob, deferred_logger);
const int cell_idx = this->well_cells_[perf];
const int cell_idx = this->well_cells_[local_perf_index];
const auto& int_quantities = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
const auto& fs = int_quantities.fluidState();
// pressure difference between the segment and the perforation
const Scalar perf_seg_press_diff = this->segments_.getPressureDiffSegPerf(seg, perf);
// pressure difference between the perforation and the grid cell
const Scalar cell_perf_press_diff = this->cell_perforation_pressure_diffs_[perf];
const Scalar cell_perf_press_diff = this->cell_perforation_pressure_diffs_[local_perf_index];
const Scalar pressure_cell = this->getPerfCellPressure(fs).value();
// calculating the b for the connection
@@ -1281,7 +1303,7 @@ namespace Opm
// the well index associated with the connection
const Scalar trans_mult = simulator.problem().template wellTransMultiplier<Scalar>(int_quantities, cell_idx);
const auto& wellstate_nupcol = simulator.problem().wellModel().nupcolWellState().well(this->index_of_well_);
const std::vector<Scalar> tw_perf = this->wellIndex(perf, int_quantities, trans_mult, wellstate_nupcol);
const std::vector<Scalar> tw_perf = this->wellIndex(local_perf_index, int_quantities, trans_mult, wellstate_nupcol);
std::vector<Scalar> ipr_a_perf(this->ipr_a_.size());
std::vector<Scalar> ipr_b_perf(this->ipr_b_.size());
for (int comp_idx = 0; comp_idx < this->num_components_; ++comp_idx) {
@@ -1316,6 +1338,8 @@ namespace Opm
}
}
}
this->parallel_well_info_.communication().sum(this->ipr_a_.data(), this->ipr_a_.size());
this->parallel_well_info_.communication().sum(this->ipr_b_.data(), this->ipr_b_.size());
}
template<typename TypeTag>
@@ -1678,6 +1702,9 @@ namespace Opm
const auto report = getWellConvergence(simulator, well_state, Base::B_avg_, deferred_logger, relax_convergence);
converged = report.converged();
if (this->parallel_well_info_.communication().size() > 1 && converged != this->parallel_well_info_.communication().min(converged)) {
OPM_THROW(std::runtime_error, fmt::format("Misalignment of the parallel simulation run in iterateWellEqWithSwitching - the well calculation succeeded on rank {} but failed on other ranks.", this->parallel_well_info_.communication().rank()));
}
if (converged) {
// if equations are sufficiently linear they might converge in less than min_its_after_switch
// in this case, make sure all constraints are satisfied before returning
@@ -1817,6 +1844,59 @@ namespace Opm
const int nseg = this->numberOfSegments();
for (int seg = 0; seg < nseg; ++seg) {
// calculating the perforation rate for each perforation that belongs to this segment
const EvalWell seg_pressure = this->primary_variables_.getSegmentPressure(seg);
auto& perf_data = ws.perf_data;
auto& perf_rates = perf_data.phase_rates;
auto& perf_press_state = perf_data.pressure;
for (const int perf : this->segments_.perforations()[seg]) {
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
const int cell_idx = this->well_cells_[local_perf_index];
const auto& int_quants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
std::vector<EvalWell> mob(this->num_components_, 0.0);
getMobility(simulator, local_perf_index, mob, deferred_logger);
const Scalar trans_mult = simulator.problem().template wellTransMultiplier<Scalar>(int_quants, cell_idx);
const auto& wellstate_nupcol = simulator.problem().wellModel().nupcolWellState().well(this->index_of_well_);
const std::vector<Scalar> Tw = this->wellIndex(local_perf_index, int_quants, trans_mult, wellstate_nupcol);
std::vector<EvalWell> cq_s(this->num_components_, 0.0);
EvalWell perf_press;
PerforationRates<Scalar> perfRates;
computePerfRate(int_quants, mob, Tw, seg, perf, seg_pressure,
allow_cf, cq_s, perf_press, perfRates, deferred_logger);
// updating the solution gas rate and solution oil rate
if (this->isProducer()) {
ws.phase_mixing_rates[ws.dissolved_gas] += perfRates.dis_gas;
ws.phase_mixing_rates[ws.vaporized_oil] += perfRates.vap_oil;
perf_data.phase_mixing_rates[local_perf_index][ws.dissolved_gas] = perfRates.dis_gas;
perf_data.phase_mixing_rates[local_perf_index][ws.vaporized_oil] = perfRates.vap_oil;
}
// store the perf pressure and rates
for (int comp_idx = 0; comp_idx < this->num_components_; ++comp_idx) {
perf_rates[local_perf_index*this->number_of_phases_ + this->modelCompIdxToFlowCompIdx(comp_idx)] = cq_s[comp_idx].value();
}
perf_press_state[local_perf_index] = perf_press.value();
for (int comp_idx = 0; comp_idx < this->num_components_; ++comp_idx) {
// the cq_s entering mass balance equations need to consider the efficiency factors.
const EvalWell cq_s_effective = cq_s[comp_idx] * this->well_efficiency_factor_;
this->connectionRates_[local_perf_index][comp_idx] = Base::restrictEval(cq_s_effective);
MultisegmentWellAssemble(*this).
assemblePerforationEq(seg, local_perf_index, comp_idx, cq_s_effective, this->linSys_);
}
}
}
if (this->parallel_well_info_.communication().size() > 1) {
// accumulate resWell_ and duneD_ in parallel to get effects of all perforations (might be distributed)
this->linSys_.sumDistributed(this->parallel_well_info_.communication());
}
for (int seg = 0; seg < nseg; ++seg) {
// calculating the accumulation term
// TODO: without considering the efficiency factor for now
@@ -1865,50 +1945,6 @@ namespace Opm
}
}
// calculating the perforation rate for each perforation that belongs to this segment
const EvalWell seg_pressure = this->primary_variables_.getSegmentPressure(seg);
auto& perf_data = ws.perf_data;
auto& perf_rates = perf_data.phase_rates;
auto& perf_press_state = perf_data.pressure;
for (const int perf : this->segments_.perforations()[seg]) {
const int cell_idx = this->well_cells_[perf];
const auto& int_quants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
std::vector<EvalWell> mob(this->num_components_, 0.0);
getMobility(simulator, perf, mob, deferred_logger);
const Scalar trans_mult = simulator.problem().template wellTransMultiplier<Scalar>(int_quants, cell_idx);
const auto& wellstate_nupcol = simulator.problem().wellModel().nupcolWellState().well(this->index_of_well_);
const std::vector<Scalar> Tw = this->wellIndex(perf, int_quants, trans_mult, wellstate_nupcol);
std::vector<EvalWell> cq_s(this->num_components_, 0.0);
EvalWell perf_press;
PerforationRates<Scalar> perfRates;
computePerfRate(int_quants, mob, Tw, seg, perf, seg_pressure,
allow_cf, cq_s, perf_press, perfRates, deferred_logger);
// updating the solution gas rate and solution oil rate
if (this->isProducer()) {
ws.phase_mixing_rates[ws.dissolved_gas] += perfRates.dis_gas;
ws.phase_mixing_rates[ws.vaporized_oil] += perfRates.vap_oil;
perf_data.phase_mixing_rates[perf][ws.dissolved_gas] = perfRates.dis_gas;
perf_data.phase_mixing_rates[perf][ws.vaporized_oil] = perfRates.vap_oil;
}
// store the perf pressure and rates
for (int comp_idx = 0; comp_idx < this->num_components_; ++comp_idx) {
perf_rates[perf*this->number_of_phases_ + this->modelCompIdxToFlowCompIdx(comp_idx)] = cq_s[comp_idx].value();
}
perf_press_state[perf] = perf_press.value();
for (int comp_idx = 0; comp_idx < this->num_components_; ++comp_idx) {
// the cq_s entering mass balance equations need to consider the efficiency factors.
const EvalWell cq_s_effective = cq_s[comp_idx] * this->well_efficiency_factor_;
this->connectionRates_[perf][comp_idx] = Base::restrictEval(cq_s_effective);
MultisegmentWellAssemble(*this).
assemblePerforationEq(seg, perf, comp_idx, cq_s_effective, this->linSys_);
}
}
// the fourth equation, the pressure drop equation
if (seg == 0) { // top segment, pressure equation is the control equation
const auto& summaryState = simulator.vanguard().summaryState();
@@ -1933,6 +1969,7 @@ namespace Opm
}
}
this->parallel_well_info_.communication().sum(this->ipr_a_.data(), this->ipr_a_.size());
this->linSys_.createSolver();
}
@@ -1959,15 +1996,18 @@ namespace Opm
for (int seg = 0; seg < nseg; ++seg) {
const EvalWell segment_pressure = this->primary_variables_.getSegmentPressure(seg);
for (const int perf : this->segments_.perforations()[seg]) {
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
const int cell_idx = this->well_cells_[perf];
const int cell_idx = this->well_cells_[local_perf_index];
const auto& intQuants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
const auto& fs = intQuants.fluidState();
// pressure difference between the segment and the perforation
const EvalWell perf_seg_press_diff = this->segments_.getPressureDiffSegPerf(seg, perf);
// pressure difference between the perforation and the grid cell
const Scalar cell_perf_press_diff = this->cell_perforation_pressure_diffs_[perf];
const Scalar cell_perf_press_diff = this->cell_perforation_pressure_diffs_[local_perf_index];
const Scalar pressure_cell = this->getPerfCellPressure(fs).value();
const Scalar perf_press = pressure_cell - cell_perf_press_diff;
@@ -1985,6 +2025,12 @@ namespace Opm
}
}
}
const auto& comm = this->parallel_well_info_.communication();
if (comm.size() > 1)
{
all_drawdown_wrong_direction =
(comm.min(all_drawdown_wrong_direction ? 1 : 0) == 1);
}
return all_drawdown_wrong_direction;
}
@@ -2166,7 +2212,11 @@ namespace Opm
const int nseg = this->numberOfSegments();
for (int seg = 0; seg < nseg; ++seg) {
for (const int perf : this->segments_.perforations()[seg]) {
const int cell_idx = this->well_cells_[perf];
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
const int cell_idx = this->well_cells_[local_perf_index];
const auto& int_quants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
const auto& fs = int_quants.fluidState();
Scalar pressure_cell = this->getPerfCellPressure(fs).value();
@@ -2194,13 +2244,17 @@ namespace Opm
// calculating the perforation rate for each perforation that belongs to this segment
const Scalar seg_pressure = getValue(this->primary_variables_.getSegmentPressure(seg));
for (const int perf : this->segments_.perforations()[seg]) {
const int cell_idx = this->well_cells_[perf];
const int local_perf_index = this->pw_info_.globalToLocal(perf);
if (local_perf_index < 0) // then the perforation is not on this process
continue;
const int cell_idx = this->well_cells_[local_perf_index];
const auto& int_quants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/ 0);
std::vector<Scalar> mob(this->num_components_, 0.0);
getMobility(simulator, perf, mob, deferred_logger);
getMobility(simulator, local_perf_index, mob, deferred_logger);
const Scalar trans_mult = simulator.problem().template wellTransMultiplier<Scalar>(int_quants, cell_idx);
const auto& wellstate_nupcol = simulator.problem().wellModel().nupcolWellState().well(this->index_of_well_);
const std::vector<Scalar> Tw = this->wellIndex(perf, int_quants, trans_mult, wellstate_nupcol);
const std::vector<Scalar> Tw = this->wellIndex(local_perf_index, int_quants, trans_mult, wellstate_nupcol);
std::vector<Scalar> cq_s(this->num_components_, 0.0);
Scalar perf_press = 0.0;
PerforationRates<Scalar> perf_rates;
@@ -2211,6 +2265,11 @@ namespace Opm
}
}
}
const auto& comm = this->parallel_well_info_.communication();
if (comm.size() > 1)
{
comm.sum(well_q_s.data(), well_q_s.size());
}
return well_q_s;
}

View File

@@ -23,6 +23,7 @@
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#include <fmt/format.h>
#include <cassert>
#include <iterator>
#include <numeric>
@@ -119,6 +120,50 @@ GlobalPerfContainerFactory(const IndexSet& local_indices,
}
}
template<class Scalar>
void GlobalPerfContainerFactory<Scalar>::buildLocalToGlobalMap() const {
local_to_global_map_.clear();
for (const auto& index : local_indices_) {
local_to_global_map_[index.local()] = index.global();
}
l2g_map_built_ = true;
}
template<class Scalar>
int GlobalPerfContainerFactory<Scalar>::localToGlobal(std::size_t localIndex) const {
if (local_indices_.size() == 0)
return localIndex;
if (!l2g_map_built_)
buildLocalToGlobalMap();
auto it = local_to_global_map_.find(localIndex);
if (it == local_to_global_map_.end())
OPM_THROW(std::logic_error, fmt::format("There is no global index for the localIndex {}.", localIndex));
return it->second;
}
template<class Scalar>
void GlobalPerfContainerFactory<Scalar>::buildGlobalToLocalMap() const {
global_to_local_map_.clear();
for (const auto& index : local_indices_) {
global_to_local_map_[index.global()] = index.local().local();
}
g2l_map_built_ = true;
}
template<class Scalar>
int GlobalPerfContainerFactory<Scalar>::globalToLocal(const int globalIndex) const {
if (local_indices_.size() == 0)
return globalIndex;
if (!g2l_map_built_) {
buildGlobalToLocalMap();
}
auto it = global_to_local_map_.find(globalIndex);
if (it == global_to_local_map_.end())
return -1; // Global index not found
return it->second;
}
template<class Scalar>
std::vector<Scalar> GlobalPerfContainerFactory<Scalar>::
createGlobal(const std::vector<Scalar>& local_perf_container,
@@ -477,6 +522,22 @@ ParallelWellInfo<Scalar>::ParallelWellInfo(const std::pair<std::string, bool>& w
isOwner_ = (comm_->rank() == 0);
}
template<class Scalar>
int ParallelWellInfo<Scalar>::localToGlobal(std::size_t localIndex) const {
if(globalPerfCont_)
return globalPerfCont_->localToGlobal(localIndex);
else // If globalPerfCont_ is not set up, then this is a sequential run and local and global indices are the same.
return localIndex;
}
template<class Scalar>
int ParallelWellInfo<Scalar>::globalToLocal(const int globalIndex) const {
if(globalPerfCont_)
return globalPerfCont_->globalToLocal(globalIndex);
else // If globalPerfCont_ is not set up, then this is a sequential run and local and global indices are the same.
return globalIndex;
}
template<class Scalar>
void ParallelWellInfo<Scalar>::communicateFirstPerforation(bool hasFirst)
{

View File

@@ -161,8 +161,16 @@ public:
std::size_t num_components) const;
int numGlobalPerfs() const;
int globalToLocal(const int globalIndex) const;
int localToGlobal(std::size_t localIndex) const;
private:
void buildLocalToGlobalMap() const;
void buildGlobalToLocalMap() const;
mutable std::unordered_map<std::size_t, int> local_to_global_map_; // Cache for L2G mapping
mutable std::unordered_map<int, std::size_t> global_to_local_map_; // Cache for G2L mapping
mutable bool l2g_map_built_ = false;
mutable bool g2l_map_built_ = false;
const IndexSet& local_indices_;
Parallel::Communication comm_;
int num_global_perfs_;
@@ -208,6 +216,8 @@ public:
/// \brief Collectively decide which rank has first perforation.
void communicateFirstPerforation(bool hasFirst);
int globalToLocal(const int globalIndex) const;
int localToGlobal(std::size_t localIndex) const;
/// If the well does not have any open connections the member rankWithFirstPerf
/// is not initialized, and no broadcast is performed. In this case the argument

View File

@@ -133,9 +133,9 @@ Scalar computeHydrostaticCorrection(const Scalar well_ref_depth, const Scalar vf
return dp;
}
template<typename Scalar, typename Comm>
void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat,
Dune::DynamicVector<Scalar>& vec,
template<typename MatrixType, typename VectorType, typename Comm>
void sumDistributedWellEntries(MatrixType& mat,
VectorType& vec,
const Comm& comm)
{
// DynamicMatrix does not use one contiguous array for storing the data
@@ -145,7 +145,7 @@ void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat,
{
return;
}
std::vector<Scalar> allEntries;
std::vector<typename MatrixType::value_type> allEntries;
allEntries.reserve(mat.N()*mat.M()+vec.size());
for(const auto& row: mat)
{
@@ -154,7 +154,7 @@ void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat,
allEntries.insert(allEntries.end(), vec.begin(), vec.end());
comm.sum(allEntries.data(), allEntries.size());
auto pos = allEntries.begin();
auto cols = mat.cols();
auto cols = mat.mat_cols();
for(auto&& row: mat)
{
std::copy(pos, pos + cols, &(row[0]));
@@ -229,6 +229,11 @@ using Comm = Parallel::Communication;
template void ParallelStandardWellB<T>:: \
mmv(const Vec<T,Dim>&,DynVec<T>&) const;
#define INSTANTIATE_WE(T,Dim) \
template void sumDistributedWellEntries(Dune::FieldMatrix<T,Dim,Dim>& mat, \
Dune::FieldVector<T,Dim>& vec, \
const Comm& comm);
#define INSTANTIATE_TYPE(T) \
template class ParallelStandardWellB<T>; \
template void sumDistributedWellEntries(Dune::DynamicMatrix<T>& mat, \
@@ -244,7 +249,10 @@ using Comm = Parallel::Communication;
INSTANTIATE(T,3) \
INSTANTIATE(T,4) \
INSTANTIATE(T,5) \
INSTANTIATE(T,6)
INSTANTIATE(T,6) \
INSTANTIATE_WE(T,2) \
INSTANTIATE_WE(T,3) \
INSTANTIATE_WE(T,4)
INSTANTIATE_TYPE(double)

View File

@@ -76,9 +76,9 @@ Scalar computeHydrostaticCorrection(const Scalar well_ref_depth,
const Scalar gravity);
/// \brief Sums entries of the diagonal Matrix for distributed wells
template<typename Scalar, typename Comm>
void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat,
Dune::DynamicVector<Scalar>& vec,
template<typename MatrixType, typename VectorType, typename Comm>
void sumDistributedWellEntries(MatrixType& mat,
VectorType& vec,
const Comm& comm);

View File

@@ -88,6 +88,7 @@ WellInterfaceGeneric(const Well& well,
// We do not want to count SHUT perforations here, so
// it would be wrong to use wells.getConnections().size().
// This is the number_of_perforations_ on this process only!
number_of_perforations_ = perf_data.size();
// perforations related

View File

@@ -311,7 +311,7 @@ protected:
// well index for each perforation
std::vector<Scalar> well_index_;
// number of the perforations for this well
// number of the perforations for this well on this process
int number_of_perforations_;
// depth for each perforation

View File

@@ -1627,7 +1627,9 @@ namespace Opm
{
// Add a Forchheimer term to the gas phase CTF if the run uses
// either of the WDFAC or the WDFACCOR keywords.
if (static_cast<std::size_t>(perf) >= this->well_cells_.size()) {
OPM_THROW(std::invalid_argument,"The perforation index exceeds the size of the local containers - possibly wellIndex was called with a global instead of a local perforation index!");
}
auto wi = std::vector<Scalar>
(this->num_components_, this->well_index_[perf] * trans_mult);
@@ -1818,6 +1820,9 @@ namespace Opm
return std::array<Eval,3>{};
}
};
if (static_cast<std::size_t>(perf) >= this->well_cells_.size()) {
OPM_THROW(std::invalid_argument,"The perforation index exceeds the size of the local containers - possibly getMobility was called with a global instead of a local perforation index!");
}
const int cell_idx = this->well_cells_[perf];
assert (int(mob.size()) == this->num_components_);
const auto& intQuants = simulator.model().intensiveQuantities(cell_idx, /*timeIdx=*/0);

View File

@@ -755,14 +755,15 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
// TODO: to see if this strategy can benefit StandardWell too
// TODO: it might cause big problem for gas rate control or if there is a gas rate limit
// maybe the best way is to initialize the fractions first then get the rates
for (int perf = 0; perf < n_activeperf; perf++)
for (std::size_t perf = 0; perf < perf_data.size(); perf++)
perf_rates[perf*np + gaspos] *= 100;
}
const auto& perf_rates = perf_data.phase_rates;
std::vector<Scalar> perforation_rates(perf_rates.begin(), perf_rates.end());
calculateSegmentRates(segment_inlets, segment_perforations, perforation_rates, np, 0 /* top segment */, ws.segments.rates);
calculateSegmentRates(ws.parallel_info, segment_inlets, segment_perforations, perforation_rates, np, 0 /* top segment */, ws.segments.rates);
// for the segment pressure, the segment pressure is the same with the first perforation belongs to the segment
// if there is no perforation associated with this segment, it uses the pressure from the outlet segment
// which requres the ordering is successful
@@ -773,10 +774,15 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
auto& segment_pressure = ws.segments.pressure;
segment_pressure[0] = ws.bhp;
const auto& perf_press = perf_data.pressure;
// The segment_indices contain the indices of the segments, that are only available on one process.
std::vector<int> segment_indices;
for (int seg = 1; seg < well_nseg; ++seg) {
if (!segment_perforations[seg].empty()) {
const int first_perf = segment_perforations[seg][0];
const int first_perf = ws.parallel_info.get().globalToLocal(segment_perforations[seg][0]);
if (first_perf > -1) { //-1 indicates that the global id is not on this process
segment_pressure[seg] = perf_press[first_perf];
}
segment_indices.push_back(seg);
} else {
// seg_press_.push_back(bhp); // may not be a good decision
// using the outlet segment pressure // it needs the ordering is correct
@@ -784,6 +790,21 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
segment_pressure[seg] = segment_pressure[segment_set.segmentNumberToIndex(outlet_seg)];
}
}
if (ws.parallel_info.get().communication().size() > 1) {
// Communicate the segment_pressure values
std::vector<Scalar> values_to_combine(segment_indices.size(), 0.0);
for (size_t i = 0; i < segment_indices.size(); ++i) {
values_to_combine[i] = segment_pressure[segment_indices[i]];
}
ws.parallel_info.get().communication().sum(values_to_combine.data(), values_to_combine.size());
// Now make segment_pressure equal across all processes
for (size_t i = 0; i < segment_indices.size(); ++i) {
segment_pressure[segment_indices[i]] = values_to_combine[i];
}
}
}
}
}
@@ -818,7 +839,8 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
template<class Scalar>
void WellState<Scalar>::
calculateSegmentRates(const std::vector<std::vector<int>>& segment_inlets,
calculateSegmentRatesBeforeSum(const ParallelWellInfo<Scalar>& pw_info,
const std::vector<std::vector<int>>& segment_inlets,
const std::vector<std::vector<int>>& segment_perforations,
const std::vector<Scalar>& perforation_rates,
const int np, const int segment,
@@ -833,17 +855,34 @@ calculateSegmentRates(const std::vector<std::vector<int>>& segment_inlets,
}
// contributions from the perforations belong to this segment
for (const int& perf : segment_perforations[segment]) {
auto local_perf = pw_info.globalToLocal(perf);
// If local_perf == -1, then the perforation is not on this process.
// The perforation of the other processes are added in calculateSegmentRates.
if (local_perf > -1) {
for (int p = 0; p < np; ++p) {
segment_rates[np * segment + p] += perforation_rates[np * perf + p];
segment_rates[np * segment + p] += perforation_rates[np * local_perf + p];
}
}
}
for (const int& inlet_seg : segment_inlets[segment]) {
calculateSegmentRates(segment_inlets, segment_perforations, perforation_rates, np, inlet_seg, segment_rates);
calculateSegmentRatesBeforeSum(pw_info, segment_inlets, segment_perforations, perforation_rates, np, inlet_seg, segment_rates);
for (int p = 0; p < np; ++p) {
segment_rates[np * segment + p] += segment_rates[np * inlet_seg + p];
}
}
}
template<class Scalar>
void WellState<Scalar>::
calculateSegmentRates(const ParallelWellInfo<Scalar>& pw_info,
const std::vector<std::vector<int>>& segment_inlets,
const std::vector<std::vector<int>>& segment_perforations,
const std::vector<Scalar>& perforation_rates,
const int np, const int segment,
std::vector<Scalar>& segment_rates)
{
calculateSegmentRatesBeforeSum(pw_info, segment_inlets, segment_perforations, perforation_rates, np, segment, segment_rates);
pw_info.communication().sum(segment_rates.data(), segment_rates.size());
}
template<class Scalar>
void WellState<Scalar>::stopWell(int well_index)

View File

@@ -151,8 +151,9 @@ public:
void initWellStateMSWell(const std::vector<Well>& wells_ecl,
const WellState* prev_well_state);
static void calculateSegmentRates(const std::vector<std::vector<int>>& segment_inlets, const
std::vector<std::vector<int>>& segment_perforations,
static void calculateSegmentRates(const ParallelWellInfo<Scalar>& pw_info,
const std::vector<std::vector<int>>& segment_inlets,
const std::vector<std::vector<int>>& segment_perforations,
const std::vector<Scalar>& perforation_rates,
const int np,
const int segment,
@@ -417,6 +418,15 @@ private:
Scalar temperature_first_connection,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const SummaryState& summary_state);
static void calculateSegmentRatesBeforeSum(const ParallelWellInfo<Scalar>& pw_info,
const std::vector<std::vector<int>>& segment_inlets,
const std::vector<std::vector<int>>& segment_perforations,
const std::vector<Scalar>& perforation_rates,
const int np,
const int segment,
std::vector<Scalar>& segment_rates);
};
} // namespace Opm