Merge pull request #5341 from jakobtorben/NLDD_remove_need_for_addWellContrib

Remove the need for add well contributions to matrix for NLDD
This commit is contained in:
Atgeirr Flø Rasmussen 2024-10-10 16:23:52 +02:00 committed by GitHub
commit a7efc0091d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 292 additions and 65 deletions

View File

@ -242,9 +242,6 @@ namespace Opm {
convergence_reports_.reserve(300); // Often insufficient, but avoids frequent moves.
// TODO: remember to fix!
if (param_.nonlinear_solver_ == "nldd") {
if (!param_.matrix_add_well_contributions_) {
OPM_THROW(std::runtime_error, "The --nonlinear-solver=nldd option can only be used with --matrix-add-well-contributions=true");
}
if (terminal_output) {
OpmLog::info("Using Non-Linear Domain Decomposition solver (nldd).");
}

View File

@ -169,6 +169,7 @@ public:
loc_param.linear_solver_print_json_definition_ = false;
const bool force_serial = true;
domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
domain_linsolvers_.back().setDomainIndex(index);
}
assert(int(domains_.size()) == num_domains);

View File

@ -450,6 +450,16 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
const CommunicationType* comm() const { return comm_.get(); }
void setDomainIndex(const int index)
{
domainIndex_ = index;
}
bool isNlddLocalSolver() const
{
return parameters_[activeSolverNum_].is_nldd_local_solver_;
}
protected:
#if HAVE_MPI
using Comm = Dune::OwnerOverlapCopyCommunication<int, int>;
@ -490,8 +500,15 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
OPM_TIMEBLOCK(flexibleSolverPrepare);
if (shouldCreateSolver()) {
if (!useWellConn_) {
auto wellOp = std::make_unique<WellModelOperator>(simulator_.problem().wellModel());
flexibleSolver_[activeSolverNum_].wellOperator_ = std::move(wellOp);
if (isNlddLocalSolver()) {
auto wellOp = std::make_unique<DomainWellModelAsLinearOperator<WellModel, Vector, Vector>>(simulator_.problem().wellModel());
wellOp->setDomainIndex(domainIndex_);
flexibleSolver_[activeSolverNum_].wellOperator_ = std::move(wellOp);
}
else {
auto wellOp = std::make_unique<WellModelOperator>(simulator_.problem().wellModel());
flexibleSolver_[activeSolverNum_].wellOperator_ = std::move(wellOp);
}
}
std::function<Vector()> weightCalculator = this->getWeightsCalculator(prm_[activeSolverNum_], getMatrix(), pressureIndex);
OPM_TIMEBLOCK(flexibleSolverCreate);
@ -651,6 +668,8 @@ std::unique_ptr<Matrix> blockJacobiAdjacency(const Grid& grid,
std::vector<int> overlapRows_;
std::vector<int> interiorRows_;
int domainIndex_ = -1;
bool useWellConn_;
std::vector<FlowLinearSolverParameters> parameters_;

View File

@ -123,10 +123,46 @@ public:
return wellMod_.numLocalWellsEnd();
}
private:
protected:
const WellModel& wellMod_;
};
template <class WellModel, class X, class Y>
class DomainWellModelAsLinearOperator : public WellModelAsLinearOperator<WellModel, X, Y>
{
public:
using WBase = WellModelAsLinearOperator<WellModel, X, Y>;
using WBase::WBase; // inherit all constructors from the base class
using field_type = typename WBase::field_type;
using PressureMatrix = typename WBase::PressureMatrix;
void setDomainIndex(int index) { domainIndex_ = index; }
void apply(const X& x, Y& y) const override
{
OPM_TIMEBLOCK(apply);
this->wellMod_.applyDomain(x, y, domainIndex_);
}
void applyscaleadd(field_type alpha, const X& x, Y& y) const override
{
OPM_TIMEBLOCK(applyscaleadd);
this->wellMod_.applyScaleAddDomain(alpha, x, y, domainIndex_);
}
void addWellPressureEquations(PressureMatrix& jacobian,
const X& weights,
const bool use_well_weights) const override
{
OPM_TIMEBLOCK(addWellPressureEquations);
this->wellMod_.addWellPressureEquationsDomain(jacobian, weights, use_well_weights, domainIndex_);
}
private:
int domainIndex_ = -1;
};
/*!
\brief Adapter to combine a matrix and another linear operator into
a combined linear operator.

View File

@ -33,6 +33,8 @@
#include <string>
#include <vector>
#include <opm/grid/utility/SparseTable.hpp>
#include <opm/input/eclipse/Schedule/Group/Group.hpp>
#include <opm/input/eclipse/Schedule/Group/GuideRate.hpp>
#include <opm/input/eclipse/Schedule/Schedule.hpp>
@ -297,12 +299,11 @@ template<class Scalar> class WellContributions;
return this->computeWellBlockAveragePressures();
}
// subtract Binv(D)rw from r;
void apply( BVector& r) const;
// subtract B*inv(D)*C * x from A*x
void apply(const BVector& x, BVector& Ax) const;
void applyDomain(const BVector& x, BVector& Ax, const int domainIndex) const;
#if COMPILE_BDA_BRIDGE
// accumulate the contributions of all Wells in the WellContributions object
void getWellContributions(WellContributions<Scalar>& x) const;
@ -311,6 +312,8 @@ template<class Scalar> class WellContributions;
// apply well model with scaling of alpha
void applyScaleAdd(const Scalar alpha, const BVector& x, BVector& Ax) const;
void applyScaleAddDomain(const Scalar alpha, const BVector& x, BVector& Ax, const int domainIndex) const;
// Check if well equations is converged.
ConvergenceReport getWellConvergence(const std::vector<Scalar>& B_avg, const bool checkWellGroupControls = false) const;
@ -361,6 +364,12 @@ template<class Scalar> class WellContributions;
void addWellPressureEquations(PressureMatrix& jacobian, const BVector& weights,const bool use_well_weights) const;
void addWellPressureEquationsDomain([[maybe_unused]] PressureMatrix& jacobian,
[[maybe_unused]] const BVector& weights,
[[maybe_unused]] const bool use_well_weights,
[[maybe_unused]] const int domainIndex) const;
void addWellPressureEquationsStruct(PressureMatrix& jacobian) const;
void initGliftEclWellMap(GLiftEclWells &ecl_well_map);
@ -441,6 +450,9 @@ template<class Scalar> class WellContributions;
// Keep track of the domain of each well, if using subdomains.
std::map<std::string, int> well_domain_;
// Store the local index of the wells perforated cells in the domain, if using sumdomains
SparseTable<int> well_local_cells_;
const Grid& grid() const
{ return simulator_.vanguard().grid(); }
@ -584,6 +596,15 @@ template<class Scalar> class WellContributions;
private:
BlackoilWellModel(Simulator& simulator, const PhaseUsage& pu);
// These members are used to avoid reallocation in specific functions
// (e.g., apply, applyDomain) instead of using local variables.
// Their state is not relevant between function calls, so they can
// (and must) be mutable, as the functions using them are const.
mutable BVector x_local_;
mutable BVector Ax_local_;
mutable BVector res_local_;
mutable GlobalEqVector linearize_res_local_;
};

View File

@ -238,7 +238,19 @@ namespace Opm {
}
// Apply as Schur complement the well residual to reservoir residuals:
// r = r - duneC_^T * invDuneD_ * resWell_
well->apply(res);
// Well equations B and C uses only the perforated cells, so need to apply on local residual
const auto& cells = well->cells();
linearize_res_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
linearize_res_local_[i] = res[cells[i]];
}
well->apply(linearize_res_local_);
for (size_t i = 0; i < cells.size(); ++i) {
res[cells[i]] = linearize_res_local_[i];
}
}
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::linearize failed: ",
simulator_.gridView().comm());
@ -262,7 +274,19 @@ namespace Opm {
}
// Apply as Schur complement the well residual to reservoir residuals:
// r = r - duneC_^T * invDuneD_ * resWell_
well->apply(res);
// Well equations B and C uses only the perforated cells, so need to apply on local residual
const auto& cells = well->cells();
linearize_res_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
linearize_res_local_[i] = res[cells[i]];
}
well->apply(linearize_res_local_);
for (size_t i = 0; i < cells.size(); ++i) {
res[cells[i]] = linearize_res_local_[i];
}
}
}
}
@ -1706,18 +1730,6 @@ namespace Opm {
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
apply(BVector& r) const
{
for (auto& well : well_container_) {
well->apply(r);
}
}
// Ax = A x - C D^-1 B x
template<typename TypeTag>
void
@ -1725,7 +1737,52 @@ namespace Opm {
apply(const BVector& x, BVector& Ax) const
{
for (auto& well : well_container_) {
well->apply(x, Ax);
// Well equations B and C uses only the perforated cells, so need to apply on local vectors
const auto& cells = well->cells();
x_local_.resize(cells.size());
Ax_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
x_local_[i] = x[cells[i]];
Ax_local_[i] = Ax[cells[i]];
}
well->apply(x_local_, Ax_local_);
for (size_t i = 0; i < cells.size(); ++i) {
// only need to update Ax
Ax[cells[i]] = Ax_local_[i];
}
}
}
// Ax = A x - C D^-1 B x
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
applyDomain(const BVector& x, BVector& Ax, const int domainIndex) const
{
for (size_t well_index = 0; well_index < well_container_.size(); ++well_index) {
auto& well = well_container_[well_index];
if (well_domain_.at(well->name()) == domainIndex) {
// Well equations B and C uses only the perforated cells, so need to apply on local vectors
// transfer global cells index to local subdomain cells index
const auto& local_cells = well_local_cells_[well_index];
x_local_.resize(local_cells.size());
Ax_local_.resize(local_cells.size());
for (size_t i = 0; i < local_cells.size(); ++i) {
x_local_[i] = x[local_cells[i]];
Ax_local_[i] = Ax[local_cells[i]];
}
well->apply(x_local_, Ax_local_);
for (size_t i = 0; i < local_cells.size(); ++i) {
// only need to update Ax
Ax[local_cells[i]] = Ax_local_[i];
}
}
}
}
@ -1788,6 +1845,27 @@ namespace Opm {
Ax.axpy( alpha, scaleAddRes_ );
}
// Ax = Ax - alpha * C D^-1 B x
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
applyScaleAddDomain(const Scalar alpha, const BVector& x, BVector& Ax, const int domainIndex) const
{
if (this->well_container_.empty()) {
return;
}
if( scaleAddRes_.size() != Ax.size() ) {
scaleAddRes_.resize( Ax.size() );
}
scaleAddRes_ = 0.0;
// scaleAddRes_ = - C D^-1 B x
applyDomain(x, scaleAddRes_, domainIndex);
// Ax = Ax + alpha * scaleAddRes_
Ax.axpy( alpha, scaleAddRes_ );
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
@ -1801,11 +1879,11 @@ namespace Opm {
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
addWellPressureEquations(PressureMatrix& jacobian, const BVector& weights,const bool use_well_weights) const
addWellPressureEquations(PressureMatrix& jacobian, const BVector& weights, const bool use_well_weights) const
{
int nw = this->numLocalWellsEnd();
int nw = this->numLocalWellsEnd();
int rdofs = local_num_cells_;
for ( int i = 0; i < nw; i++ ){
for ( int i = 0; i < nw; i++ ) {
int wdof = rdofs + i;
jacobian[wdof][wdof] = 1.0;// better scaling ?
}
@ -1815,6 +1893,31 @@ namespace Opm {
}
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
addWellPressureEquationsDomain([[maybe_unused]] PressureMatrix& jacobian,
[[maybe_unused]] const BVector& weights,
[[maybe_unused]] const bool use_well_weights,
[[maybe_unused]] const int domainIndex) const
{
throw std::logic_error("CPRW is not yet implemented for NLDD subdomains");
// To fix this function, rdofs should be the size of the domain, and the nw should be the number of wells in the domain
// int nw = this->numLocalWellsEnd(); // should number of wells in the domain
// int rdofs = local_num_cells_; // should be the size of the domain
// for ( int i = 0; i < nw; i++ ) {
// int wdof = rdofs + i;
// jacobian[wdof][wdof] = 1.0;// better scaling ?
// }
// for ( const auto& well : well_container_ ) {
// if (well_domain_.at(well->name()) == domainIndex) {
// weights should be the size of the domain
// well->addWellPressureEquations(jacobian, weights, pressureVarIndex, use_well_weights, this->wellState());
// }
// }
}
template <typename TypeTag>
void BlackoilWellModel<TypeTag>::
addReservoirSourceTerms(GlobalEqVector& residual,
@ -1876,7 +1979,13 @@ namespace Opm {
OPM_BEGIN_PARALLEL_TRY_CATCH();
{
for (auto& well : well_container_) {
well->recoverWellSolutionAndUpdateWellState(simulator_, x, this->wellState(), local_deferredLogger);
const auto& cells = well->cells();
x_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
x_local_[i] = x[cells[i]];
}
well->recoverWellSolutionAndUpdateWellState(simulator_, x_local_, this->wellState(), local_deferredLogger);
}
}
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger,
@ -1896,7 +2005,13 @@ namespace Opm {
DeferredLogger local_deferredLogger;
for (auto& well : well_container_) {
if (well_domain_.at(well->name()) == domain.index) {
well->recoverWellSolutionAndUpdateWellState(simulator_, x,
const auto& cells = well->cells();
x_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
x_local_[i] = x[cells[i]];
}
well->recoverWellSolutionAndUpdateWellState(simulator_, x_local_,
this->wellState(),
local_deferredLogger);
}
@ -2887,6 +3002,29 @@ namespace Opm {
if (this->terminal_output_) {
global_log.logMessages();
}
}
// Pre-calculate the local cell indices for each well
well_local_cells_.clear();
well_local_cells_.reserve(well_container_.size(), 10);
std::vector<int> local_cells;
for (const auto& well : well_container_) {
const auto& global_cells = well->cells();
const int domain_index = well_domain_.at(well->name());
const auto& domain_cells = domains[domain_index].cells;
local_cells.resize(global_cells.size());
// find the local cell index for each well cell in the domain
// assume domain_cells is sorted
for (size_t i = 0; i < global_cells.size(); ++i) {
auto it = std::lower_bound(domain_cells.begin(), domain_cells.end(), global_cells[i]);
if (it != domain_cells.end() && *it == global_cells[i]) {
local_cells[i] = std::distance(domain_cells.begin(), it);
} else {
OPM_THROW(std::runtime_error, fmt::format("Cell {} not found in domain {}",
global_cells[i], domain_index));
}
}
well_local_cells_.appendRow(local_cells.begin(), local_cells.end());
}
}
} // namespace Opm

View File

@ -78,8 +78,8 @@ init(const int num_cells,
}
duneD_.setSize(well_.numberOfSegments(), well_.numberOfSegments(), nnz_d);
}
duneB_.setSize(well_.numberOfSegments(), num_cells, numPerfs);
duneC_.setSize(well_.numberOfSegments(), num_cells, numPerfs);
duneB_.setSize(well_.numberOfSegments(), numPerfs, numPerfs);
duneC_.setSize(well_.numberOfSegments(), numPerfs, numPerfs);
// we need to add the off diagonal ones
for (auto row = duneD_.createbegin(),
@ -108,8 +108,7 @@ init(const int num_cells,
end = duneC_.createend(); row != end; ++row) {
// the number of the row corresponds to the segment number now.
for (const int& perf : perforations[row.index()]) {
const int cell_idx = cells[perf];
row.insert(cell_idx);
row.insert(perf);
}
}
@ -118,12 +117,14 @@ init(const int num_cells,
end = duneB_.createend(); row != end; ++row) {
// the number of the row corresponds to the segment number now.
for (const int& perf : perforations[row.index()]) {
const int cell_idx = cells[perf];
row.insert(cell_idx);
row.insert(perf);
}
}
resWell_.resize(well_.numberOfSegments());
// Store the global index of well perforated cells
cells_ = cells;
}
template<class Scalar, int numWellEq, int numEq>
@ -299,11 +300,12 @@ extract(SparseMatrixAdapter& jacobian) const
for (std::size_t rowC = 0; rowC < duneC_.N(); ++rowC) {
for (auto colC = duneC_[rowC].begin(),
endC = duneC_[rowC].end(); colC != endC; ++colC) {
const auto row_index = colC.index();
// map the well perforated cell index to global cell index
const auto row_index = cells_[colC.index()];
for (std::size_t rowB = 0; rowB < duneB_.N(); ++rowB) {
for (auto colB = duneB_[rowB].begin(),
endB = duneB_[rowB].end(); colB != endB; ++colB) {
const auto col_index = colB.index();
const auto col_index = cells_[colB.index()];
OffDiagMatrixBlockWellType tmp1;
detail::multMatrixImpl(invDuneD[rowC][rowB], (*colB), tmp1, std::true_type());
typename SparseMatrixAdapter::MatrixBlock tmp2;
@ -329,12 +331,14 @@ extractCPRPressureMatrix(PressureMatrix& jacobian,
// Add the pressure contribution to the cpr system for the well
// Add for coupling from well to reservoir
const int welldof_ind = duneC_.M() + well.indexOfWell();
const int number_cells = weights.size();
const int welldof_ind = number_cells + well.indexOfWell();
if (!well.isPressureControlled(well_state)) {
for (std::size_t rowC = 0; rowC < duneC_.N(); ++rowC) {
for (auto colC = duneC_[rowC].begin(),
endC = duneC_[rowC].end(); colC != endC; ++colC) {
const auto row_index = colC.index();
// map the well perforated cell index to global cell index
const auto row_index = cells_[colC.index()];
const auto& bw = weights[row_index];
double matel = 0.0;
@ -354,7 +358,8 @@ extractCPRPressureMatrix(PressureMatrix& jacobian,
for (std::size_t rowB = 0; rowB < duneB_.N(); ++rowB) {
for (auto colB = duneB_[rowB].begin(),
endB = duneB_[rowB].end(); colB != endB; ++colB) {
const auto col_index = colB.index();
// map the well perforated cell index to global cell index
const auto col_index = cells_[colB.index()];
const auto& bw = weights[col_index];
well_weight += bw;
num_perfs += 1;
@ -370,7 +375,8 @@ extractCPRPressureMatrix(PressureMatrix& jacobian,
const auto& bw = well_weight;
for (auto colB = duneB_[rowB].begin(),
endB = duneB_[rowB].end(); colB != endB; ++colB) {
const auto col_index = colB.index();
// map the well perforated cell index to global cell index
const auto col_index = cells_[colB.index()];
double matel = 0.0;
for (std::size_t i = 0; i< bw.size(); ++i) {
matel += bw[i] *(*colB)[i][pressureVarIndex];

View File

@ -145,6 +145,9 @@ public:
BVectorWell resWell_;
const MultisegmentWellGeneric<Scalar>& well_; //!< Reference to well
// Store the global index of well perforated cells
std::vector<int> cells_;
};
}

View File

@ -1906,7 +1906,7 @@ namespace Opm
this->connectionRates_[perf][comp_idx] = Base::restrictEval(cq_s_effective);
MultisegmentWellAssemble(*this).
assemblePerforationEq(seg, cell_idx, comp_idx, cq_s_effective, this->linSys_);
assemblePerforationEq(seg, perf, comp_idx, cq_s_effective, this->linSys_);
}
}

View File

@ -62,8 +62,8 @@ init(const int num_cells,
// B D] x_well] res_well]
// set the size of the matrices
duneD_.setSize(1, 1, 1);
duneB_.setSize(1, num_cells, numPerfs);
duneC_.setSize(1, num_cells, numPerfs);
duneB_.setSize(1, numPerfs, numPerfs);
duneC_.setSize(1, numPerfs, numPerfs);
for (auto row = duneD_.createbegin(),
end = duneD_.createend(); row != end; ++row) {
@ -76,29 +76,25 @@ init(const int num_cells,
for (auto row = duneB_.createbegin(),
end = duneB_.createend(); row != end; ++row) {
for (int perf = 0 ; perf < numPerfs; ++perf) {
const int cell_idx = cells[perf];
row.insert(cell_idx);
row.insert(perf);
}
}
for (int perf = 0 ; perf < numPerfs; ++perf) {
const int cell_idx = cells[perf];
// the block size is run-time determined now
duneB_[0][cell_idx].resize(numWellEq, numEq);
duneB_[0][perf].resize(numWellEq, numEq);
}
// make the C^T matrix
for (auto row = duneC_.createbegin(),
end = duneC_.createend(); row != end; ++row) {
for (int perf = 0; perf < numPerfs; ++perf) {
const int cell_idx = cells[perf];
row.insert(cell_idx);
row.insert(perf);
}
}
for (int perf = 0; perf < numPerfs; ++perf) {
const int cell_idx = cells[perf];
duneC_[0][cell_idx].resize(numWellEq, numEq);
duneC_[0][perf].resize(numWellEq, numEq);
}
resWell_.resize(1);
@ -115,6 +111,9 @@ init(const int num_cells,
for (unsigned i = 0; i < duneD_.N(); ++i) {
invDrw_[i].resize(numWellEq);
}
// Store the global index of well perforated cells
cells_ = cells;
}
template<class Scalar, int numEq>
@ -265,14 +264,17 @@ extract(SparseMatrixAdapter& jacobian) const
for (auto colC = duneC_[0].begin(),
endC = duneC_[0].end(); colC != endC; ++colC)
{
const auto row_index = colC.index();
// map the well perforated cell index to global cell index
const auto row_index = this->cells_[colC.index()];
for (auto colB = duneB_[0].begin(),
endB = duneB_[0].end(); colB != endB; ++colB)
{
// map the well perforated cell index to global cell index
const auto col_index = this->cells_[colB.index()];
detail::multMatrix(invDuneD_[0][0], (*colB), tmp);
detail::negativeMultMatrixTransposed((*colC), tmp, tmpMat);
jacobian.addToBlock(row_index, colB.index(), tmpMat);
jacobian.addToBlock(row_index, col_index, tmpMat);
}
}
}
@ -311,22 +313,23 @@ extractCPRPressureMatrix(PressureMatrix& jacobian,
int nperf = 0;
auto cell_weights = weights[0];// not need for not(use_well_weights)
cell_weights = 0.0;
assert(duneC_.M() == weights.size());
const int welldof_ind = duneC_.M() + well.indexOfWell();
const int number_cells = weights.size();
const int welldof_ind = number_cells + well.indexOfWell();
// do not assume anything about pressure controlled with use_well_weights (work fine with the assumtion also)
if (!well.isPressureControlled(well_state) || use_well_weights) {
// make coupling for reservoir to well
for (auto colC = duneC_[0].begin(),
endC = duneC_[0].end(); colC != endC; ++colC) {
const auto row_ind = colC.index();
const auto& bw = weights[row_ind];
// map the well perforated cell index to global cell index
const auto row_index = cells_[colC.index()];
const auto& bw = weights[row_index];
Scalar matel = 0;
assert((*colC).M() == bw.size());
for (std::size_t i = 0; i < bw.size(); ++i) {
matel += (*colC)[bhp_var_index][i] * bw[i];
}
jacobian[row_ind][welldof_ind] = matel;
jacobian[row_index][welldof_ind] = matel;
cell_weights += bw;
nperf += 1;
}
@ -385,7 +388,8 @@ extractCPRPressureMatrix(PressureMatrix& jacobian,
if (!well.isPressureControlled(well_state) || use_well_weights) {
for (auto colB = duneB_[0].begin(),
endB = duneB_[0].end(); colB != endB; ++colB) {
const auto col_index = colB.index();
// map the well perforated cell index to global cell index
const auto col_index = cells_[colB.index()];
const auto& bw = bweights[0];
Scalar matel = 0;
for (std::size_t i = 0; i < bw.size(); ++i) {

View File

@ -150,6 +150,9 @@ private:
// several vector used in the matrix calculation
mutable BVectorWell Bx_;
mutable BVectorWell invDrw_;
// Store the global index of well perforated cells
std::vector<int> cells_;
};
}

View File

@ -404,7 +404,6 @@ namespace Opm
water_flux_s, deferred_logger);
}
}
const int cell_idx = this->well_cells_[perf];
for (int componentIdx = 0; componentIdx < this->num_components_; ++componentIdx) {
// the cq_s entering mass balance equations need to consider the efficiency factors.
const EvalWell cq_s_effective = cq_s[componentIdx] * this->well_efficiency_factor_;
@ -414,7 +413,7 @@ namespace Opm
StandardWellAssemble<FluidSystem,Indices>(*this).
assemblePerforationEq(cq_s_effective,
componentIdx,
cell_idx,
perf,
this->primary_variables_.numWellEq(),
this->linSys_);
@ -430,7 +429,7 @@ namespace Opm
if constexpr (has_zFraction) {
StandardWellAssemble<FluidSystem,Indices>(*this).
assembleZFracEq(cq_s_zfrac_effective,
cell_idx,
perf,
this->primary_variables_.numWellEq(),
this->linSys_);
}
@ -2133,7 +2132,7 @@ namespace Opm
eq_wat_vel,
pskin_index,
wat_vel_index,
cell_idx,
perf,
this->primary_variables_.numWellEq(),
this->linSys_);
}