Merge pull request #5826 from akva2/move_well_linearization

Move well linearization code out of BlackoilWellModel to WellConnectionAuxillaryModule
This commit is contained in:
Atgeirr Flø Rasmussen 2025-01-07 13:39:19 +01:00 committed by GitHub
commit 16a38feec1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 175 additions and 318 deletions

View File

@ -201,7 +201,6 @@ list (APPEND MAIN_SOURCE_FILES
opm/simulators/wells/VFPProdProperties.cpp
opm/simulators/wells/WellAssemble.cpp
opm/simulators/wells/WellBhpThpCalculator.cpp
opm/simulators/wells/WellConnectionAuxiliaryModule.cpp
opm/simulators/wells/WellConstraints.cpp
opm/simulators/wells/WellConvergence.cpp
opm/simulators/wells/WellFilterCake.cpp

View File

@ -24,13 +24,11 @@
#ifndef OPM_BLACKOILWELLMODEL_HEADER_INCLUDED
#define OPM_BLACKOILWELLMODEL_HEADER_INCLUDED
#include <opm/common/OpmLog/OpmLog.hpp>
#include <dune/common/fmatrix.hh>
#include <dune/istl/bcrsmatrix.hh>
#include <dune/istl/matrixmatrix.hh>
#include <cstddef>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <opm/common/OpmLog/OpmLog.hpp>
#include <opm/grid/utility/SparseTable.hpp>
@ -39,7 +37,7 @@
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/input/eclipse/Schedule/Well/WellTestState.hpp>
#include <opm/models/discretization/common/baseauxiliarymodule.hh>
#include <opm/material/densead/Math.hpp>
#include <opm/simulators/flow/countGlobalCells.hpp>
#include <opm/simulators/flow/FlowBaseVanguard.hpp>
@ -47,6 +45,11 @@
#include <opm/simulators/linalg/matrixblock.hh>
#include <opm/simulators/timestepping/SimulatorReport.hpp>
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <opm/simulators/utils/DeferredLogger.hpp>
#include <opm/simulators/wells/BlackoilWellModelGasLift.hpp>
#include <opm/simulators/wells/BlackoilWellModelGeneric.hpp>
#include <opm/simulators/wells/BlackoilWellModelGuideRates.hpp>
@ -63,22 +66,18 @@
#include <opm/simulators/wells/StandardWell.hpp>
#include <opm/simulators/wells/VFPInjProperties.hpp>
#include <opm/simulators/wells/VFPProdProperties.hpp>
#include <opm/simulators/wells/WGState.hpp>
#include <opm/simulators/wells/WellConnectionAuxiliaryModule.hpp>
#include <opm/simulators/wells/WellGroupHelpers.hpp>
#include <opm/simulators/wells/WellInterface.hpp>
#include <opm/simulators/wells/WellProdIndexCalculator.hpp>
#include <opm/simulators/wells/WellState.hpp>
#include <opm/simulators/wells/WGState.hpp>
#include <opm/simulators/timestepping/SimulatorReport.hpp>
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <dune/common/fmatrix.hh>
#include <dune/istl/bcrsmatrix.hh>
#include <dune/istl/matrixmatrix.hh>
#include <opm/material/densead/Math.hpp>
#include <opm/simulators/utils/DeferredLogger.hpp>
#include <cstddef>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace Opm {
@ -88,7 +87,7 @@ template<class Scalar> class WellContributions;
/// Class for handling the blackoil well model.
template<typename TypeTag>
class BlackoilWellModel : public BaseAuxiliaryModule<TypeTag>
class BlackoilWellModel : public WellConnectionAuxiliaryModule<TypeTag, BlackoilWellModel<TypeTag>>
, public BlackoilWellModelGeneric<GetPropType<TypeTag,
Properties::Scalar>>
{
@ -106,8 +105,9 @@ template<class Scalar> class WellContributions;
using SparseMatrixAdapter = GetPropType<TypeTag, Properties::SparseMatrixAdapter>;
using ModelParameters = BlackoilModelParameters<Scalar>;
using WellConnectionModule = WellConnectionAuxiliaryModule<TypeTag, BlackoilWellModel<TypeTag>>;
constexpr static std::size_t pressureVarIndex = GetPropType<TypeTag, Properties::Indices>::pressureSwitchIdx;
typedef typename BaseAuxiliaryModule<TypeTag>::NeighborSet NeighborSet;
static const int numEq = Indices::numEq;
static const int solventSaturationIdx = Indices::solventSaturationIdx;
@ -139,51 +139,6 @@ template<class Scalar> class WellContributions;
void init();
void initWellContainer(const int reportStepIdx) override;
/////////////
// <eWoms auxiliary module stuff>
/////////////
unsigned numDofs() const override
// No extra dofs are inserted for wells. (we use a Schur complement.)
{ return 0; }
void addNeighbors(std::vector<NeighborSet>& neighbors) const override;
void applyInitial() override
{}
void linearize(SparseMatrixAdapter& jacobian, GlobalEqVector& res) override;
void linearizeDomain(const Domain& domain, SparseMatrixAdapter& jacobian, GlobalEqVector& res);
void postSolve(GlobalEqVector& deltaX) override
{
recoverWellSolutionAndUpdateWellState(deltaX);
}
void postSolveDomain(GlobalEqVector& deltaX, const Domain& domain)
{
recoverWellSolutionAndUpdateWellStateDomain(deltaX, domain);
}
/////////////
// </ eWoms auxiliary module stuff>
/////////////
template <class Restarter>
void deserialize(Restarter& /* res */)
{
// TODO (?)
}
/*!
* \brief This method writes the complete state of the well
* to the harddisk.
*/
template <class Restarter>
void serialize(Restarter& /* res*/)
{
// TODO (?)
}
void beginEpisode()
{
OPM_TIMEBLOCK(beginEpsiode);
@ -371,6 +326,22 @@ template<class Scalar> class WellContributions;
auto end() const { return well_container_.end(); }
bool empty() const { return well_container_.empty(); }
bool addMatrixContributions() const { return param_.matrix_add_well_contributions_; }
int compressedIndexForInterior(int cartesian_cell_idx) const override
{
return simulator_.vanguard().compressedIndexForInterior(cartesian_cell_idx);
}
// using the solution x to recover the solution xw for wells and applying
// xw to update Well State
void recoverWellSolutionAndUpdateWellState(const BVector& x);
// using the solution x to recover the solution xw for wells and applying
// xw to update Well State
void recoverWellSolutionAndUpdateWellStateDomain(const BVector& x,
const Domain& domain);
protected:
Simulator& simulator_;
@ -470,14 +441,6 @@ template<class Scalar> class WellContributions;
// called at the end of a report step
void endReportStep();
// using the solution x to recover the solution xw for wells and applying
// xw to update Well State
void recoverWellSolutionAndUpdateWellState(const BVector& x);
// using the solution x to recover the solution xw for wells and applying
// xw to update Well State
void recoverWellSolutionAndUpdateWellStateDomain(const BVector& x, const Domain& domain);
// setting the well_solutions_ based on well_state.
void updatePrimaryVariables(DeferredLogger& deferred_logger);
@ -529,10 +492,6 @@ template<class Scalar> class WellContributions;
void computeWellTemperature();
int compressedIndexForInterior(int cartesian_cell_idx) const override {
return simulator_.vanguard().compressedIndexForInterior(cartesian_cell_idx);
}
private:
BlackoilWellModel(Simulator& simulator, const PhaseUsage& pu);
@ -543,8 +502,6 @@ template<class Scalar> class WellContributions;
// Their state is not relevant between function calls, so they can
// (and must) be mutable, as the functions using them are const.
mutable BVector x_local_;
mutable BVector res_local_;
mutable GlobalEqVector linearize_res_local_;
};

View File

@ -214,6 +214,8 @@ public:
return well_domain_;
}
std::vector<int> getCellsForConnections(const Well& well) const;
bool reportStepStarts() const { return report_step_starts_; }
bool shouldBalanceNetwork(const int reportStepIndex,
@ -435,7 +437,6 @@ protected:
/// \brief get compressed index for interior cells (-1, otherwise
virtual int compressedIndexForInterior(int cartesian_cell_idx) const = 0;
std::vector<int> getCellsForConnections(const Well& well) const;
std::vector<std::vector<int>> getMaxWellConnections() const;
std::vector<std::string> getWellsForTesting(const int timeStepIdx,

View File

@ -57,10 +57,6 @@
#include <opm/simulators/linalg/gpubridge/WellContributions.hpp>
#endif
#if HAVE_MPI
#include <opm/simulators/utils/MPISerializer.hpp>
#endif
#include <algorithm>
#include <cassert>
#include <iomanip>
@ -73,7 +69,8 @@ namespace Opm {
template<typename TypeTag>
BlackoilWellModel<TypeTag>::
BlackoilWellModel(Simulator& simulator, const PhaseUsage& phase_usage)
: BlackoilWellModelGeneric<Scalar>(simulator.vanguard().schedule(),
: WellConnectionModule(*this, simulator.gridView().comm())
, BlackoilWellModelGeneric<Scalar>(simulator.vanguard().schedule(),
simulator.vanguard().summaryState(),
simulator.vanguard().eclState(),
phase_usage,
@ -190,116 +187,6 @@ namespace Opm {
}
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
addNeighbors(std::vector<NeighborSet>& neighbors) const
{
if (!param_.matrix_add_well_contributions_) {
return;
}
// Create cartesian to compressed mapping
const auto& schedule_wells = this->schedule().getWellsatEnd();
auto possibleFutureConnections = this->schedule().getPossibleFutureConnections();
#if HAVE_MPI
// Communicate Map to other processes, since it is only available on rank 0
const auto& comm = this->simulator_.vanguard().grid().comm();
Parallel::MpiSerializer ser(comm);
ser.broadcast(possibleFutureConnections);
#endif
// initialize the additional cell connections introduced by wells.
for (const auto& well : schedule_wells)
{
std::vector<int> wellCells = this->getCellsForConnections(well);
// Now add the cells of the possible future connections
const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name());
if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) {
for (auto& global_index : possibleFutureConnectionSetIt->second) {
int compressed_idx = compressedIndexForInterior(global_index);
if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells.
wellCells.push_back(compressed_idx);
}
}
}
for (int cellIdx : wellCells) {
neighbors[cellIdx].insert(wellCells.begin(),
wellCells.end());
}
}
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
linearize(SparseMatrixAdapter& jacobian, GlobalEqVector& res)
{
OPM_BEGIN_PARALLEL_TRY_CATCH();
for (const auto& well: well_container_) {
// Modifiy the Jacobian with explicit Schur complement
// contributions if requested.
if (param_.matrix_add_well_contributions_) {
well->addWellContributions(jacobian);
}
// Apply as Schur complement the well residual to reservoir residuals:
// r = r - duneC_^T * invDuneD_ * resWell_
// Well equations B and C uses only the perforated cells, so need to apply on local residual
const auto& cells = well->cells();
linearize_res_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
linearize_res_local_[i] = res[cells[i]];
}
well->apply(linearize_res_local_);
for (size_t i = 0; i < cells.size(); ++i) {
res[cells[i]] = linearize_res_local_[i];
}
}
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::linearize failed: ",
simulator_.gridView().comm());
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
linearizeDomain(const Domain& domain, SparseMatrixAdapter& jacobian, GlobalEqVector& res)
{
// Note: no point in trying to do a parallel gathering
// try/catch here, as this function is not called in
// parallel but for each individual domain of each rank.
for (const auto& well: well_container_) {
if (this->well_domain_.at(well->name()) == domain.index) {
// Modifiy the Jacobian with explicit Schur complement
// contributions if requested.
if (param_.matrix_add_well_contributions_) {
well->addWellContributions(jacobian);
}
// Apply as Schur complement the well residual to reservoir residuals:
// r = r - duneC_^T * invDuneD_ * resWell_
// Well equations B and C uses only the perforated cells, so need to apply on local residual
const auto& cells = well->cells();
linearize_res_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
linearize_res_local_[i] = res[cells[i]];
}
well->apply(linearize_res_local_);
for (size_t i = 0; i < cells.size(); ++i) {
res[cells[i]] = linearize_res_local_[i];
}
}
}
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
@ -1711,7 +1598,9 @@ namespace Opm {
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
addWellPressureEquations(PressureMatrix& jacobian, const BVector& weights, const bool use_well_weights) const
addWellPressureEquations(PressureMatrix& jacobian,
const BVector& weights,
const bool use_well_weights) const
{
int nw = this->numLocalWellsEnd();
int rdofs = local_num_cells_;
@ -1720,8 +1609,12 @@ namespace Opm {
jacobian[wdof][wdof] = 1.0;// better scaling ?
}
for ( const auto& well : well_container_ ) {
well->addWellPressureEquations(jacobian, weights, pressureVarIndex, use_well_weights, this->wellState());
for (const auto& well : well_container_) {
well->addWellPressureEquations(jacobian,
weights,
pressureVarIndex,
use_well_weights,
this->wellState());
}
}
@ -1810,14 +1703,15 @@ namespace Opm {
DeferredLogger local_deferredLogger;
OPM_BEGIN_PARALLEL_TRY_CATCH();
{
for (auto& well : well_container_) {
for (const auto& well : well_container_) {
const auto& cells = well->cells();
x_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
x_local_[i] = x[cells[i]];
}
well->recoverWellSolutionAndUpdateWellState(simulator_, x_local_, this->wellState(), local_deferredLogger);
well->recoverWellSolutionAndUpdateWellState(simulator_, x_local_,
this->wellState(), local_deferredLogger);
}
}
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger,

View File

@ -1,83 +0,0 @@
/*
Copyright 2017 Dr. Blatt - HPC-Simulation-Software & Services
Copyright 2017 Statoil ASA.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <opm/simulators/wells/WellConnectionAuxiliaryModule.hpp>
#include <opm/grid/CpGrid.hpp>
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#include <algorithm>
namespace Opm
{
WellConnectionAuxiliaryModuleGeneric::
WellConnectionAuxiliaryModuleGeneric(const Schedule& schedule,
const Dune::CpGrid& grid)
{
// Create cartesian to compressed mapping
const auto& globalCell = grid.globalCell();
const auto& cartesianSize = grid.logicalCartesianSize();
auto size = cartesianSize[0] * cartesianSize[1] * cartesianSize[2];
std::vector<int> cartesianToCompressed(size, -1);
auto begin = globalCell.begin();
for (auto cell = begin, end = globalCell.end(); cell != end; ++cell)
{
cartesianToCompressed[ *cell ] = cell - begin;
}
const auto& schedule_wells = schedule.getWellsatEnd();
wells_.reserve(schedule_wells.size());
// initialize the additional cell connections introduced by wells.
for (const auto& well : schedule_wells)
{
std::vector<int> compressed_well_perforations;
// All possible completions of the well
const auto& completionSet = well.getConnections();
compressed_well_perforations.reserve(completionSet.size());
for (const auto& completion : completionSet)
{
int compressed_idx = cartesianToCompressed[completion.global_index()];
if (compressed_idx >= 0) // Ignore completions in inactive/remote cells.
{
compressed_well_perforations.push_back(compressed_idx);
}
}
if (!compressed_well_perforations.empty())
{
std::sort(compressed_well_perforations.begin(),
compressed_well_perforations.end());
wells_.push_back(compressed_well_perforations);
}
}
}
} // end namespace Opm

View File

@ -23,68 +23,157 @@
#include <opm/models/discretization/common/baseauxiliarymodule.hh>
#include <vector>
#include <opm/simulators/flow/SubDomain.hpp>
namespace Dune { class CpGrid; }
#include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
#include <opm/simulators/utils/ParallelCommunication.hpp>
namespace Opm
{
class Schedule;
class WellConnectionAuxiliaryModuleGeneric
{
protected:
WellConnectionAuxiliaryModuleGeneric(const Schedule& schedule,
const Dune::CpGrid& grid);
std::vector<std::vector<int> > wells_;
};
template<class TypeTag>
class WellConnectionAuxiliaryModule
: public BaseAuxiliaryModule<TypeTag>
, private WellConnectionAuxiliaryModuleGeneric
#if HAVE_MPI
#include <opm/simulators/utils/MPISerializer.hpp>
#endif
namespace Opm {
template<class TypeTag, class Model>
class WellConnectionAuxiliaryModule : public BaseAuxiliaryModule<TypeTag>
{
using Grid = GetPropType<TypeTag, Properties::Grid>;
using GlobalEqVector = GetPropType<TypeTag, Properties::GlobalEqVector>;
using SparseMatrixAdapter = GetPropType<TypeTag, Properties::SparseMatrixAdapter>;
public:
using NeighborSet = typename
::Opm::BaseAuxiliaryModule<TypeTag>::NeighborSet;
WellConnectionAuxiliaryModule(const Schedule& schedule,
const Dune::CpGrid& grid)
: WellConnectionAuxiliaryModuleGeneric(schedule, grid)
using Domain = SubDomain<Grid>;
WellConnectionAuxiliaryModule(Model& model, Parallel::Communication comm)
: model_(model)
, lin_comm_(std::move(comm))
{
}
unsigned numDofs() const
unsigned numDofs() const override
{
// No extra dofs are inserted for wells.
return 0;
}
void addNeighbors(std::vector<NeighborSet>& neighbors) const
void addNeighbors(std::vector<NeighborSet>& neighbors) const override
{
for (const auto& well_perforations : wells_)
if (!model_.addMatrixContributions()) {
return;
}
// Create cartesian to compressed mapping
const auto& schedule_wells = model_.schedule().getWellsatEnd();
auto possibleFutureConnections = model_.schedule().getPossibleFutureConnections();
#if HAVE_MPI
// Communicate Map to other processes, since it is only available on rank 0
Parallel::MpiSerializer ser(lin_comm_);
ser.broadcast(possibleFutureConnections);
#endif
// initialize the additional cell connections introduced by wells.
for (const auto& well : schedule_wells)
{
for (const auto& perforation : well_perforations)
neighbors[perforation].insert(well_perforations.begin(),
well_perforations.end());
std::vector<int> wellCells = model_.getCellsForConnections(well);
// Now add the cells of the possible future connections
const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name());
if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) {
for (auto& global_index : possibleFutureConnectionSetIt->second) {
int compressed_idx = model_.compressedIndexForInterior(global_index);
if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells.
wellCells.push_back(compressed_idx);
}
}
}
for (int cellIdx : wellCells) {
neighbors[cellIdx].insert(wellCells.begin(),
wellCells.end());
}
}
}
void applyInitial()
void applyInitial() override
{}
void linearize(SparseMatrixAdapter& , GlobalEqVector&)
void linearize(SparseMatrixAdapter& jacobian, GlobalEqVector& res) override
{
// Linearization is done in StandardDenseWells
OPM_BEGIN_PARALLEL_TRY_CATCH();
for (const auto& well : model_) {
this->linearizeSingleWell(jacobian, res, well);
}
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::linearize failed: ", lin_comm_);
}
private:
void postSolve(GlobalEqVector& deltaX) override
{
model_.recoverWellSolutionAndUpdateWellState(deltaX);
}
void linearizeDomain(const Domain& domain,
SparseMatrixAdapter& jacobian,
GlobalEqVector& res)
{
// Note: no point in trying to do a parallel gathering
// try/catch here, as this function is not called in
// parallel but for each individual domain of each rank.
for (const auto& well : model_) {
if (model_.well_domain().at(well->name()) == domain.index) {
this->linearizeSingleWell(jacobian, res, well);
}
}
}
void postSolveDomain(GlobalEqVector& deltaX, const Domain& domain)
{
model_.recoverWellSolutionAndUpdateWellStateDomain(deltaX, domain);
}
template <class Restarter>
void deserialize(Restarter& /* res */)
{
// TODO (?)
}
/*!
* \brief This method writes the complete state of the well
* to the harddisk.
*/
template <class Restarter>
void serialize(Restarter& /* res*/)
{
// TODO (?)
}
private:
template<class WellType>
void linearizeSingleWell(SparseMatrixAdapter& jacobian,
GlobalEqVector& res,
const WellType& well)
{
if (model_.addMatrixContributions()) {
well->addWellContributions(jacobian);
}
const auto& cells = well->cells();
linearize_res_local_.resize(cells.size());
for (size_t i = 0; i < cells.size(); ++i) {
linearize_res_local_[i] = res[cells[i]];
}
well->apply(linearize_res_local_);
for (size_t i = 0; i < cells.size(); ++i) {
res[cells[i]] = linearize_res_local_[i];
}
}
Model& model_;
GlobalEqVector linearize_res_local_{};
Parallel::Communication lin_comm_;
};
} // end namespace OPM