Merge pull request #5906 from akva2/janitoring_const_correctness

Janitoring: const correctness
This commit is contained in:
Bård Skaflestad 2025-01-23 09:06:18 +01:00 committed by GitHub
commit 0507c87e39
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 222 additions and 152 deletions

View File

@ -509,8 +509,8 @@ private:
if (!onUpperBoundary_(pos)) if (!onUpperBoundary_(pos))
return false; return false;
Scalar xInject[] = { 0.25, 0.75 }; const Scalar xInject[] = { 0.25, 0.75 };
Scalar injectLen[] = { 0.1, 0.1 }; const Scalar injectLen[] = { 0.1, 0.1 };
for (unsigned i = 0; i < sizeof(xInject) / sizeof(Scalar); ++i) { for (unsigned i = 0; i < sizeof(xInject) / sizeof(Scalar); ++i) {
if (xInject[i] - injectLen[i] / 2 < lambda if (xInject[i] - injectLen[i] / 2 < lambda
&& lambda < xInject[i] + injectLen[i] / 2) && lambda < xInject[i] + injectLen[i] / 2)

View File

@ -28,12 +28,14 @@
#ifndef EWOMS_DISPERSION_MODULE_HH #ifndef EWOMS_DISPERSION_MODULE_HH
#define EWOMS_DISPERSION_MODULE_HH #define EWOMS_DISPERSION_MODULE_HH
#include <dune/common/fvector.hh>
#include <opm/models/common/multiphasebaseproperties.hh>
#include <opm/models/discretization/common/fvbaseproperties.hh> #include <opm/models/discretization/common/fvbaseproperties.hh>
#include <opm/material/common/MathToolbox.hpp>
#include <opm/material/common/Valgrind.hpp> #include <opm/material/common/Valgrind.hpp>
#include <dune/common/fvector.hh>
#include <stdexcept> #include <stdexcept>
namespace Opm { namespace Opm {
@ -371,7 +373,7 @@ protected:
for (unsigned i = 0; i < phaseIdxs.size(); ++i) { for (unsigned i = 0; i < phaseIdxs.size(); ++i) {
normVelocityCell_[i] = 0; normVelocityCell_[i] = 0;
} }
for (auto& velocityInfo : velocityInfos) { for (const auto& velocityInfo : velocityInfos) {
for (unsigned i = 0; i < phaseIdxs.size(); ++i) { for (unsigned i = 0; i < phaseIdxs.size(); ++i) {
if (FluidSystem::phaseIsActive(phaseIdxs[i])) { if (FluidSystem::phaseIsActive(phaseIdxs[i])) {
normVelocityCell_[phaseIdxs[i]] = max( normVelocityCell_[phaseIdxs[i]], normVelocityCell_[phaseIdxs[i]] = max( normVelocityCell_[phaseIdxs[i]],

View File

@ -136,11 +136,12 @@ BlackoilAquiferModel<TypeTag>::endTimeStep()
{ {
using NumAq = AquiferNumerical<TypeTag>; using NumAq = AquiferNumerical<TypeTag>;
for (auto& aquifer : this->aquifers) { for (const auto& aquifer : this->aquifers) {
aquifer->endTimeStep(); aquifer->endTimeStep();
NumAq* num = dynamic_cast<NumAq*>(aquifer.get()); const NumAq* num = dynamic_cast<const NumAq*>(aquifer.get());
if (num) if (num) {
this->simulator_.vanguard().grid().comm().barrier(); this->simulator_.vanguard().grid().comm().barrier();
}
} }
} }

View File

@ -81,7 +81,7 @@ void mergeParallelLogFiles(std::string_view output_dir,
enableLoggingFalloutWarning)); enableLoggingFalloutWarning));
} }
void handleExtraConvergenceOutput(SimulatorReport& report, void handleExtraConvergenceOutput(const SimulatorReport& report,
std::string_view option, std::string_view option,
std::string_view optionName, std::string_view optionName,
std::string_view output_dir, std::string_view output_dir,

View File

@ -36,7 +36,7 @@ void mergeParallelLogFiles(std::string_view output_dir,
std::string_view deck_filename, std::string_view deck_filename,
bool enableLoggingFalloutWarning); bool enableLoggingFalloutWarning);
void handleExtraConvergenceOutput(SimulatorReport& report, void handleExtraConvergenceOutput(const SimulatorReport& report,
std::string_view option, std::string_view option,
std::string_view optionName, std::string_view optionName,
std::string_view output_dir, std::string_view output_dir,

View File

@ -23,6 +23,8 @@
#ifndef OPM_GENERIC_THRESHOLD_PRESSURE_IMPL_HPP #ifndef OPM_GENERIC_THRESHOLD_PRESSURE_IMPL_HPP
#define OPM_GENERIC_THRESHOLD_PRESSURE_IMPL_HPP #define OPM_GENERIC_THRESHOLD_PRESSURE_IMPL_HPP
#include <opm/simulators/flow/GenericThresholdPressure.hpp>
#include <dune/grid/common/mcmgmapper.hh> #include <dune/grid/common/mcmgmapper.hh>
#include <dune/grid/common/rangegenerators.hh> #include <dune/grid/common/rangegenerators.hh>
@ -34,8 +36,6 @@
#include <opm/input/eclipse/EclipseState/SimulationConfig/SimulationConfig.hpp> #include <opm/input/eclipse/EclipseState/SimulationConfig/SimulationConfig.hpp>
#include <opm/input/eclipse/EclipseState/SimulationConfig/ThresholdPressure.hpp> #include <opm/input/eclipse/EclipseState/SimulationConfig/ThresholdPressure.hpp>
#include <opm/simulators/flow/GenericThresholdPressure.hpp>
#include <fmt/format.h> #include <fmt/format.h>
#include <algorithm> #include <algorithm>
@ -73,10 +73,11 @@ thresholdPressure(int elem1Idx, int elem2Idx) const
int fault1Idx = lookUpCartesianData_(elem1Idx, cartElemFaultIdx_); int fault1Idx = lookUpCartesianData_(elem1Idx, cartElemFaultIdx_);
int fault2Idx = lookUpCartesianData_(elem2Idx, cartElemFaultIdx_); int fault2Idx = lookUpCartesianData_(elem2Idx, cartElemFaultIdx_);
if (fault1Idx != -1 && fault1Idx == fault2Idx) if (fault1Idx != -1 && fault1Idx == fault2Idx) {
// inside a fault there's no threshold pressure, even accross EQUIL // inside a fault there's no threshold pressure, even accross EQUIL
// regions. // regions.
return 0.0; return 0.0;
}
if (fault1Idx != fault2Idx) { if (fault1Idx != fault2Idx) {
// TODO: which value if a cell is part of multiple faults? we take // TODO: which value if a cell is part of multiple faults? we take
// the maximum here. // the maximum here.
@ -90,8 +91,9 @@ thresholdPressure(int elem1Idx, int elem2Idx) const
auto equilRegion1Idx = elemEquilRegion_[elem1Idx]; auto equilRegion1Idx = elemEquilRegion_[elem1Idx];
auto equilRegion2Idx = elemEquilRegion_[elem2Idx]; auto equilRegion2Idx = elemEquilRegion_[elem2Idx];
if (equilRegion1Idx == equilRegion2Idx) if (equilRegion1Idx == equilRegion2Idx) {
return 0.0; return 0.0;
}
return thpres_[equilRegion1Idx*numEquilRegions_ + equilRegion2Idx]; return thpres_[equilRegion1Idx*numEquilRegions_ + equilRegion2Idx];
} }
@ -103,8 +105,9 @@ finishInit()
const auto& simConfig = eclState_.getSimulationConfig(); const auto& simConfig = eclState_.getSimulationConfig();
enableThresholdPressure_ = simConfig.useThresholdPressure(); enableThresholdPressure_ = simConfig.useThresholdPressure();
if (!enableThresholdPressure_) if (!enableThresholdPressure_) {
return; return;
}
numEquilRegions_ = eclState_.getTableManager().getEqldims().getNumEquilRegions(); numEquilRegions_ = eclState_.getTableManager().getEqldims().getNumEquilRegions();
const decltype(numEquilRegions_) maxRegions = const decltype(numEquilRegions_) maxRegions =
@ -129,16 +132,18 @@ finishInit()
} }
// internalize the data specified using the EQLNUM keyword // internalize the data specified using the EQLNUM keyword
elemEquilRegion_ = lookUpData_.template assignFieldPropsIntOnLeaf<short unsigned int>(eclState_.fieldProps(), elemEquilRegion_ = lookUpData_.
"EQLNUM", true); template assignFieldPropsIntOnLeaf<short unsigned int>(eclState_.fieldProps(),
"EQLNUM", true);
/* /*
If this is a restart run the ThresholdPressure object will be active, If this is a restart run the ThresholdPressure object will be active,
and already properly initialized with numerical values from the restart. and already properly initialized with numerical values from the restart.
Done using GenericThresholdPressure::setFromRestart() in EclWriter::beginRestart(). Done using GenericThresholdPressure::setFromRestart() in EclWriter::beginRestart().
*/ */
if (simConfig.getThresholdPressure().restart()) if (simConfig.getThresholdPressure().restart()) {
return; return;
}
// allocate the array which specifies the threshold pressures // allocate the array which specifies the threshold pressures
thpres_.resize(numEquilRegions_*numEquilRegions_, 0.0); thpres_.resize(numEquilRegions_*numEquilRegions_, 0.0);
@ -156,10 +161,12 @@ applyExplicitThresholdPressures_()
// intersection in the grid // intersection in the grid
for (const auto& elem : elements(gridView_, Dune::Partitions::interior)) { for (const auto& elem : elements(gridView_, Dune::Partitions::interior)) {
for (const auto& intersection : intersections(gridView_, elem)) { for (const auto& intersection : intersections(gridView_, elem)) {
if (intersection.boundary()) if (intersection.boundary()) {
continue; // ignore boundary intersections for now (TODO?) continue; // ignore boundary intersections for now (TODO?)
else if (!intersection.neighbor()) //processor boundary but not domain boundary }
else if (!intersection.neighbor()) { // processor boundary but not domain boundary
continue; continue;
}
const auto& inside = intersection.inside(); const auto& inside = intersection.inside();
const auto& outside = intersection.outside(); const auto& outside = intersection.outside();
@ -189,8 +196,9 @@ applyExplicitThresholdPressures_()
} }
// apply threshold pressures across faults // apply threshold pressures across faults
if (thpres.ftSize() > 0) if (thpres.ftSize() > 0) {
configureThpresft_(); configureThpresft_();
}
} }
template<class Grid, class GridView, class ElementMapper, class Scalar> template<class Grid, class GridView, class ElementMapper, class Scalar>
@ -207,14 +215,16 @@ configureThpresft_()
int numCartesianElem = eclState_.getInputGrid().getCartesianSize(); int numCartesianElem = eclState_.getInputGrid().getCartesianSize();
thpresftValues_.resize(numFaults, -1.0); thpresftValues_.resize(numFaults, -1.0);
cartElemFaultIdx_.resize(numCartesianElem, -1); cartElemFaultIdx_.resize(numCartesianElem, -1);
for (std::size_t faultIdx = 0; faultIdx < faults.size(); faultIdx++) { for (std::size_t faultIdx = 0; faultIdx < faults.size(); ++faultIdx) {
auto& fault = faults.getFault(faultIdx); const auto& fault = faults.getFault(faultIdx);
thpresftValues_[faultIdx] = thpres.getThresholdPressureFault(faultIdx); thpresftValues_[faultIdx] = thpres.getThresholdPressureFault(faultIdx);
for (const FaultFace& face : fault) for (const FaultFace& face : fault) {
// "face" is a misnomer because the object describes a set of cell // "face" is a misnomer because the object describes a set of cell
// indices, but we go with the conventions of the parser here... // indices, but we go with the conventions of the parser here...
for (std::size_t cartElemIdx : face) for (std::size_t cartElemIdx : face) {
cartElemFaultIdx_[cartElemIdx] = faultIdx; cartElemFaultIdx_[cartElemIdx] = faultIdx;
}
}
} }
} }
@ -223,8 +233,9 @@ std::vector<Scalar>
GenericThresholdPressure<Grid,GridView,ElementMapper,Scalar>:: GenericThresholdPressure<Grid,GridView,ElementMapper,Scalar>::
getRestartVector() const getRestartVector() const
{ {
if (!enableThresholdPressure_) if (!enableThresholdPressure_) {
return {}; return {};
}
return this->thpres_; return this->thpres_;
} }
@ -243,8 +254,9 @@ void
GenericThresholdPressure<Grid,GridView,ElementMapper,Scalar>:: GenericThresholdPressure<Grid,GridView,ElementMapper,Scalar>::
logPressures() logPressures()
{ {
if (!enableThresholdPressure_) if (!enableThresholdPressure_) {
return; return;
}
auto lineFormat = [this](unsigned i, unsigned j, double val) auto lineFormat = [this](unsigned i, unsigned j, double val)
{ {
@ -274,7 +286,8 @@ logPressures()
if (thpres.hasRegionBarrier(i, j)) { if (thpres.hasRegionBarrier(i, j)) {
if (thpres.hasThresholdPressure(i, j)) { if (thpres.hasThresholdPressure(i, j)) {
str += lineFormat(i, j, thpres.getThresholdPressure(j, i)); str += lineFormat(i, j, thpres.getThresholdPressure(j, i));
} else { }
else {
std::size_t idx = (j - 1) * numEquilRegions_ + (i - 1); std::size_t idx = (j - 1) * numEquilRegions_ + (i - 1);
str += lineFormat(i, j, this->thpresDefault_[idx]); str += lineFormat(i, j, this->thpresDefault_[idx]);
} }

View File

@ -190,7 +190,7 @@ Opm::InterRegFlowMap::getInterRegFlows() const
auto maps = std::vector<data::InterRegFlowMap>{}; auto maps = std::vector<data::InterRegFlowMap>{};
maps.reserve(this->regionMaps_.size()); maps.reserve(this->regionMaps_.size());
for (auto& regionMap : this->regionMaps_) { for (const auto& regionMap : this->regionMaps_) {
maps.push_back(regionMap.getInterRegFlows()); maps.push_back(regionMap.getInterRegFlows());
} }
@ -203,7 +203,7 @@ Opm::InterRegFlowMap::getLocalMaxRegionID() const
auto maxLocalRegionID = std::vector<std::size_t>{}; auto maxLocalRegionID = std::vector<std::size_t>{};
maxLocalRegionID.reserve(this->regionMaps_.size()); maxLocalRegionID.reserve(this->regionMaps_.size());
for (auto& regionMap : this->regionMaps_) { for (const auto& regionMap : this->regionMaps_) {
maxLocalRegionID.push_back(regionMap.getLocalMaxRegionID()); maxLocalRegionID.push_back(regionMap.getLocalMaxRegionID());
} }

View File

@ -711,7 +711,7 @@ public:
if (!problem.model().linearizer().getFlowsInfo().empty()) { if (!problem.model().linearizer().getFlowsInfo().empty()) {
const auto& flowsInf = problem.model().linearizer().getFlowsInfo(); const auto& flowsInf = problem.model().linearizer().getFlowsInfo();
auto flowsInfos = flowsInf[globalDofIdx]; auto flowsInfos = flowsInf[globalDofIdx];
for (auto& flowsInfo : flowsInfos) { for (const auto& flowsInfo : flowsInfos) {
if (flowsInfo.faceId >= 0) { if (flowsInfo.faceId >= 0) {
if (!this->flows_[flowsInfo.faceId][gasCompIdx].empty()) { if (!this->flows_[flowsInfo.faceId][gasCompIdx].empty()) {
this->flows_[flowsInfo.faceId][gasCompIdx][globalDofIdx] this->flows_[flowsInfo.faceId][gasCompIdx][globalDofIdx]
@ -750,7 +750,7 @@ public:
if (!problem.model().linearizer().getFloresInfo().empty()) { if (!problem.model().linearizer().getFloresInfo().empty()) {
const auto& floresInf = problem.model().linearizer().getFloresInfo(); const auto& floresInf = problem.model().linearizer().getFloresInfo();
auto floresInfos =floresInf[globalDofIdx]; auto floresInfos =floresInf[globalDofIdx];
for (auto& floresInfo : floresInfos) { for (const auto& floresInfo : floresInfos) {
if (floresInfo.faceId >= 0) { if (floresInfo.faceId >= 0) {
if (!this->flores_[floresInfo.faceId][gasCompIdx].empty()) { if (!this->flores_[floresInfo.faceId][gasCompIdx].empty()) {
this->flores_[floresInfo.faceId][gasCompIdx][globalDofIdx] this->flores_[floresInfo.faceId][gasCompIdx][globalDofIdx]

View File

@ -244,7 +244,7 @@ public:
#ifdef RESERVOIR_COUPLING_ENABLED #ifdef RESERVOIR_COUPLING_ENABLED
// NOTE: The argc and argv will be used when launching a slave process // NOTE: The argc and argv will be used when launching a slave process
void init(SimulatorTimer &timer, int argc, char** argv) void init(const SimulatorTimer& timer, int argc, char** argv)
{ {
auto slave_mode = Parameters::Get<Parameters::Slave>(); auto slave_mode = Parameters::Get<Parameters::Slave>();
if (slave_mode) { if (slave_mode) {
@ -269,7 +269,7 @@ public:
} }
} }
#else #else
void init(SimulatorTimer &timer) void init(const SimulatorTimer& timer)
{ {
#endif #endif
simulator_.setEpisodeIndex(-1); simulator_.setEpisodeIndex(-1);

View File

@ -616,7 +616,7 @@ protected:
if (elem.partitionType() != Dune::InteriorEntity) if (elem.partitionType() != Dune::InteriorEntity)
{ {
// Dirichlet boundary conditions needed for the parallel matrix // Dirichlet boundary conditions needed for the parallel matrix
for (auto& tr : tbatch) { for (const auto& tr : tbatch) {
if (tr.numTracer() != 0) { if (tr.numTracer() != 0) {
(*tr.mat)[I][I][0][0] = 1.; (*tr.mat)[I][I][0][0] = 1.;
(*tr.mat)[I][I][1][1] = 1.; (*tr.mat)[I][I][1][1] = 1.;
@ -892,7 +892,7 @@ protected:
TracerBatch(int phaseIdx = 0) : phaseIdx_(phaseIdx) {} TracerBatch(int phaseIdx = 0) : phaseIdx_(phaseIdx) {}
int numTracer() const {return idx_.size(); } int numTracer() const { return idx_.size(); }
void addTracer(const int idx, const TV & concentration) void addTracer(const int idx, const TV & concentration)
{ {

View File

@ -61,7 +61,7 @@ void sortRow(int *colIndices, int *data, int left, int right)
// LUMat->nnzValues[ik] = LUMat->nnzValues[ik] - (pivot * LUMat->nnzValues[jk]) in ilu decomposition // LUMat->nnzValues[ik] = LUMat->nnzValues[ik] - (pivot * LUMat->nnzValues[jk]) in ilu decomposition
// a = a - (b * c) // a = a - (b * c)
template<class Scalar> template<class Scalar>
void blockMultSub(Scalar* a, Scalar* b, Scalar* c, unsigned int block_size) void blockMultSub(Scalar* a, const Scalar* b, const Scalar* c, unsigned int block_size)
{ {
for (unsigned int row = 0; row < block_size; row++) { for (unsigned int row = 0; row < block_size; row++) {
for (unsigned int col = 0; col < block_size; col++) { for (unsigned int col = 0; col < block_size; col++) {
@ -76,7 +76,8 @@ void blockMultSub(Scalar* a, Scalar* b, Scalar* c, unsigned int block_size)
/*Perform a 3x3 matrix-matrix multiplicationj on two blocks*/ /*Perform a 3x3 matrix-matrix multiplicationj on two blocks*/
template<class Scalar> template<class Scalar>
void blockMult(Scalar* mat1, Scalar* mat2, Scalar* resMat, unsigned int block_size) void blockMult(const Scalar* mat1, const Scalar* mat2,
Scalar* resMat, unsigned int block_size)
{ {
for (unsigned int row = 0; row < block_size; row++) { for (unsigned int row = 0; row < block_size; row++) {
for (unsigned int col = 0; col < block_size; col++) { for (unsigned int col = 0; col < block_size; col++) {
@ -90,8 +91,8 @@ void blockMult(Scalar* mat1, Scalar* mat2, Scalar* resMat, unsigned int block_si
} }
#define INSTANTIATE_TYPE(T) \ #define INSTANTIATE_TYPE(T) \
template void blockMultSub(T*, T*, T*, unsigned int); \ template void blockMultSub(T*, const T*, const T*, unsigned int); \
template void blockMult(T*, T*, T*, unsigned int); template void blockMult(const T*, const T*, T*, unsigned int);
INSTANTIATE_TYPE(double) INSTANTIATE_TYPE(double)

View File

@ -111,7 +111,8 @@ void sortRow(int* colIndices, int* data, int left, int right);
/// \param[in] c input block /// \param[in] c input block
/// \param[in] block_size size of block /// \param[in] block_size size of block
template<class Scalar> template<class Scalar>
void blockMultSub(Scalar* a, Scalar* b, Scalar* c, unsigned int block_size); void blockMultSub(Scalar* a, const Scalar* b,
const Scalar* c, unsigned int block_size);
/// Perform a matrix-matrix multiplication on two blocks /// Perform a matrix-matrix multiplication on two blocks
/// resMat = mat1 * mat2 /// resMat = mat1 * mat2

View File

@ -23,28 +23,28 @@
#include <opm/simulators/linalg/gpubridge/Reorder.hpp> #include <opm/simulators/linalg/gpubridge/Reorder.hpp>
#include <vector>
#include <cassert> #include <cassert>
#include <vector>
namespace Opm::Accelerator {
namespace Opm
{
namespace Accelerator
{
/* Check is operations on a node in the matrix can be started /* Check is operations on a node in the matrix can be started
* A node can only be started if all nodes that it depends on during sequential execution have already completed.*/ * A node can only be started if all nodes that it depends on during sequential
* execution have already completed.*/
bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIndices, const std::vector<bool>& doneRows) { bool canBeStarted(const int rowIndex,
const int* rowPointers,
const int* colIndices,
const std::vector<bool>& doneRows)
{
bool canStart = !doneRows[rowIndex]; bool canStart = !doneRows[rowIndex];
int i, thisDependency;
if (canStart) { if (canStart) {
for (i = rowPointers[rowIndex]; i < rowPointers[rowIndex + 1]; i++) { for (int i = rowPointers[rowIndex]; i < rowPointers[rowIndex + 1]; ++i) {
thisDependency = colIndices[i]; int thisDependency = colIndices[i];
// Only dependencies on rows that should execute before the current one are relevant // Only dependencies on rows that should execute before the current one are relevant
if (thisDependency >= rowIndex) if (thisDependency >= rowIndex) {
break; break;
}
// Check if dependency has been resolved // Check if dependency has been resolved
if (!doneRows[thisDependency]) { if (!doneRows[thisDependency]) {
return false; return false;
@ -55,14 +55,23 @@ bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIndi
} }
/* /*
* The level scheduling of a non-symmetric, blocked matrix requires access to a CSC encoding and a CSR encoding of the sparsity pattern of the input matrix. * The level scheduling of a non-symmetric, blocked matrix requires access to a CSC
* encoding and a CSR encoding of the sparsity pattern of the input matrix.
* This function is based on a standard level scheduling algorithm, like the one described in: * This function is based on a standard level scheduling algorithm, like the one described in:
* "Iterative methods for Sparse Linear Systems" by Yousef Saad in section 11.6.3 * "Iterative methods for Sparse Linear Systems" by Yousef Saad in section 11.6.3
*/ */
void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb, int *numColors, int *toOrder, int* fromOrder, std::vector<int>& rowsPerColor) { void findLevelScheduling(const int* CSRColIndices,
int activeRowIndex = 0, colorEnd, nextActiveRowIndex = 0; const int* CSRRowPointers,
int thisRow; const int* CSCRowIndices,
const int* CSCColPointers,
int Nb,
int* numColors,
int* toOrder,
int* fromOrder,
std::vector<int>& rowsPerColor)
{
int activeRowIndex = 0, nextActiveRowIndex = 0;
std::vector<bool> doneRows(Nb, false); std::vector<bool> doneRows(Nb, false);
std::vector <int> rowsToStart; std::vector <int> rowsToStart;
@ -70,23 +79,27 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd
assert(rowsPerColor.empty()); assert(rowsPerColor.empty());
// find starting rows: rows that are independent from all rows that come before them. // find starting rows: rows that are independent from all rows that come before them.
for (thisRow = 0; thisRow < Nb; thisRow++) { int thisRow;
for (thisRow = 0; thisRow < Nb; ++thisRow) {
if (canBeStarted(thisRow, CSCColPointers, CSCRowIndices, doneRows)) { if (canBeStarted(thisRow, CSCColPointers, CSCRowIndices, doneRows)) {
fromOrder[nextActiveRowIndex] = thisRow; fromOrder[nextActiveRowIndex] = thisRow;
toOrder[thisRow] = nextActiveRowIndex; toOrder[thisRow] = nextActiveRowIndex;
nextActiveRowIndex++; ++nextActiveRowIndex;
} }
} }
// 'do' compute on all active rows // 'do' compute on all active rows
for (colorEnd = 0; colorEnd < nextActiveRowIndex; colorEnd++) { int colorEnd;
for (colorEnd = 0; colorEnd < nextActiveRowIndex; ++colorEnd) {
doneRows[fromOrder[colorEnd]] = true; doneRows[fromOrder[colorEnd]] = true;
} }
rowsPerColor.emplace_back(nextActiveRowIndex - activeRowIndex); rowsPerColor.emplace_back(nextActiveRowIndex - activeRowIndex);
while (colorEnd < Nb) { while (colorEnd < Nb) {
// Go over all rows active from the last color, and check which of their neighbours can be activated this color // Go over all rows active from the last color, and check which of
for (; activeRowIndex < colorEnd; activeRowIndex++) { // their neighbours can be activated this color
for (; activeRowIndex < colorEnd; ++activeRowIndex) {
thisRow = fromOrder[activeRowIndex]; thisRow = fromOrder[activeRowIndex];
for (int i = CSCColPointers[thisRow]; i < CSCColPointers[thisRow + 1]; i++) { for (int i = CSCColPointers[thisRow]; i < CSCColPointers[thisRow + 1]; i++) {
@ -104,7 +117,7 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd
doneRows[thisRow] = true; doneRows[thisRow] = true;
fromOrder[nextActiveRowIndex] = thisRow; fromOrder[nextActiveRowIndex] = thisRow;
toOrder[thisRow] = nextActiveRowIndex; toOrder[thisRow] = nextActiveRowIndex;
nextActiveRowIndex++; ++nextActiveRowIndex;
} }
} }
rowsToStart.clear(); rowsToStart.clear();
@ -115,10 +128,13 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd
*numColors = rowsPerColor.size(); *numColors = rowsPerColor.size();
} }
// based on the scipy package from python, scipy/sparse/sparsetools/csr.h on github // based on the scipy package from python, scipy/sparse/sparsetools/csr.h on github
void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb) { void csrPatternToCsc(const int* CSRColIndices,
const int* CSRRowPointers,
int* CSCRowIndices,
int* CSCColPointers,
int Nb)
{
int nnz = CSRRowPointers[Nb]; int nnz = CSRRowPointers[Nb];
// compute number of nnzs per column // compute number of nnzs per column
@ -141,7 +157,7 @@ void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices
int col = CSRColIndices[j]; int col = CSRColIndices[j];
int dest = CSCColPointers[col]; int dest = CSCColPointers[col];
CSCRowIndices[dest] = row; CSCRowIndices[dest] = row;
CSCColPointers[col]++; ++CSCColPointers[col];
} }
} }
@ -152,6 +168,4 @@ void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices
} }
} }
} // namespace Opm::Accelerator
} // namespace Accelerator
} // namespace Opm

View File

@ -22,10 +22,7 @@
#include <vector> #include <vector>
namespace Opm namespace Opm::Accelerator {
{
namespace Accelerator
{
/// Determine whether all rows that a certain row depends on are done already /// Determine whether all rows that a certain row depends on are done already
/// \param[in] rowIndex index of the row that needs to be checked for /// \param[in] rowIndex index of the row that needs to be checked for
@ -33,7 +30,10 @@ namespace Accelerator
/// \param[in] colIndices column indices of the matrix that the row is in /// \param[in] colIndices column indices of the matrix that the row is in
/// \param[in] doneRows array that for each row lists whether it is done or not /// \param[in] doneRows array that for each row lists whether it is done or not
/// \return true iff all dependencies are done and if the result itself was not done yet /// \return true iff all dependencies are done and if the result itself was not done yet
bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIndices, const std::vector<bool>& doneRows); bool canBeStarted(const int rowIndex,
const int* rowPointers,
const int* colIndices,
const std::vector<bool>& doneRows);
/// Find a level scheduling reordering for an input matrix /// Find a level scheduling reordering for an input matrix
/// The toOrder and fromOrder arrays must be allocated already /// The toOrder and fromOrder arrays must be allocated already
@ -46,7 +46,15 @@ bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIn
/// \param[out] toOrder the reorder pattern that was found, which lists for each index in the original order, to which index in the new order it should be moved /// \param[out] toOrder the reorder pattern that was found, which lists for each index in the original order, to which index in the new order it should be moved
/// \param[out] fromOrder the reorder pattern that was found, which lists for each index in the new order, from which index in the original order it was moved /// \param[out] fromOrder the reorder pattern that was found, which lists for each index in the new order, from which index in the original order it was moved
/// \param[out] rowsPerColor for each color, an array of all rowIndices in that color, this function uses emplace_back() to fill /// \param[out] rowsPerColor for each color, an array of all rowIndices in that color, this function uses emplace_back() to fill
void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb, int *numColors, int *toOrder, int* fromOrder, std::vector<int>& rowsPerColor); void findLevelScheduling(const int* CSRColIndices,
const int* CSRRowPointers,
const int* CSCRowIndices,
const int* CSCColPointers,
int Nb,
int* numColors,
int* toOrder,
int* fromOrder,
std::vector<int>& rowsPerColor);
/// Convert a sparsity pattern stored in the CSR format to the CSC format /// Convert a sparsity pattern stored in the CSR format to the CSC format
/// CSCRowIndices and CSCColPointers arrays must be allocated already /// CSCRowIndices and CSCColPointers arrays must be allocated already
@ -56,9 +64,12 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd
/// \param[inout] CSCRowIndices row indices of the result CSC representation of the pattern /// \param[inout] CSCRowIndices row indices of the result CSC representation of the pattern
/// \param[inout] CSCColPointers column pointers of the result CSC representation of the pattern /// \param[inout] CSCColPointers column pointers of the result CSC representation of the pattern
/// \param[in] Nb number of blockrows in the matrix /// \param[in] Nb number of blockrows in the matrix
void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb); void csrPatternToCsc(const int* CSRColIndices,
const int* CSRRowPointers,
int* CSCRowIndices,
int* CSCColPointers,
int Nb);
} // namespace Accelerator } // namespace Opm::Accelerator
} // namespace Opm
#endif #endif

View File

@ -163,7 +163,7 @@ void amgclSolverBackend<Scalar,block_size>::initialize(int Nb_, int nnzbs)
template<class Scalar, unsigned int block_size> template<class Scalar, unsigned int block_size>
void amgclSolverBackend<Scalar,block_size>:: void amgclSolverBackend<Scalar,block_size>::
convert_sparsity_pattern(int* rows, int* cols) convert_sparsity_pattern(const int* rows, const int* cols)
{ {
Timer t; Timer t;
const unsigned int bs = block_size; const unsigned int bs = block_size;
@ -193,7 +193,7 @@ convert_sparsity_pattern(int* rows, int* cols)
template<class Scalar, unsigned int block_size> template<class Scalar, unsigned int block_size>
void amgclSolverBackend<Scalar,block_size>:: void amgclSolverBackend<Scalar,block_size>::
convert_data(Scalar* vals, int* rows) convert_data(const Scalar* vals, const int* rows)
{ {
Timer t; Timer t;
const unsigned int bs = block_size; const unsigned int bs = block_size;

View File

@ -105,12 +105,12 @@ private:
/// Convert the BCSR sparsity pattern to a CSR one /// Convert the BCSR sparsity pattern to a CSR one
/// \param[in] rows array of rowPointers, contains N/dim+1 values /// \param[in] rows array of rowPointers, contains N/dim+1 values
/// \param[in] cols array of columnIndices, contains nnz values /// \param[in] cols array of columnIndices, contains nnz values
void convert_sparsity_pattern(int *rows, int *cols); void convert_sparsity_pattern(const int *rows, const int *cols);
/// Convert the BCSR nonzero data to a CSR format /// Convert the BCSR nonzero data to a CSR format
/// \param[in] vals array of nonzeroes, each block is stored row-wise and contiguous, contains nnz values /// \param[in] vals array of nonzeroes, each block is stored row-wise and contiguous, contains nnz values
/// \param[in] rows array of rowPointers, contains N/dim+1 values /// \param[in] rows array of rowPointers, contains N/dim+1 values
void convert_data(Scalar* vals, int* rows); void convert_data(const Scalar* vals, const int* rows);
/// Solve linear system /// Solve linear system
/// \param[in] b pointer to b vector /// \param[in] b pointer to b vector

View File

@ -194,12 +194,13 @@ create_preconditioner(BlockedMatrix<Scalar>* mat, BlockedMatrix<Scalar>* jacMat)
{ {
const unsigned int bs = block_size; const unsigned int bs = block_size;
auto *matToDecompose = jacMat ? jacMat : mat; const auto* matToDecompose = jacMat ? jacMat : mat;
bool use_multithreading = true; bool use_multithreading = true;
#if HAVE_OPENMP #if HAVE_OPENMP
if (omp_get_max_threads() == 1) if (omp_get_max_threads() == 1) {
use_multithreading = false; use_multithreading = false;
}
#endif #endif
if (jacMat && use_multithreading) { if (jacMat && use_multithreading) {

View File

@ -18,14 +18,19 @@
*/ */
#ifndef OPM_GPUISTL_GPUOWNEROVERLAPCOPY_HPP #ifndef OPM_GPUISTL_GPUOWNEROVERLAPCOPY_HPP
#define OPM_GPUISTL_GPUOWNEROVERLAPCOPY_HPP #define OPM_GPUISTL_GPUOWNEROVERLAPCOPY_HPP
#include <dune/istl/owneroverlapcopy.hh> #include <dune/istl/owneroverlapcopy.hh>
#include <opm/simulators/linalg/gpuistl/GpuVector.hpp>
#include <mpi.h>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <opm/simulators/linalg/gpuistl/GpuVector.hpp>
#include <vector> #include <vector>
namespace Opm::gpuistl namespace Opm::gpuistl {
{
/** /**
* @brief GPUSender is a wrapper class for classes which will implement copOwnerToAll * @brief GPUSender is a wrapper class for classes which will implement copOwnerToAll
* This is implemented with the intention of creating communicators with generic GPUSender * This is implemented with the intention of creating communicators with generic GPUSender
@ -119,10 +124,11 @@ public:
explicit GPUObliviousMPISender(const OwnerOverlapCopyCommunicationType& cpuOwnerOverlapCopy) explicit GPUObliviousMPISender(const OwnerOverlapCopyCommunicationType& cpuOwnerOverlapCopy)
: GPUSender<field_type, OwnerOverlapCopyCommunicationType>(cpuOwnerOverlapCopy) : GPUSender<field_type, OwnerOverlapCopyCommunicationType>(cpuOwnerOverlapCopy)
{ {
} }
void copyOwnerToAll(const X& source, X& dest) const override { void copyOwnerToAll(const X& source, X& dest) const override
{
// TODO: [perf] Can we reduce copying from the GPU here? // TODO: [perf] Can we reduce copying from the GPU here?
// TODO: [perf] Maybe create a global buffer instead? // TODO: [perf] Maybe create a global buffer instead?
auto sourceAsDuneVector = source.template asDuneBlockVector<block_size>(); auto sourceAsDuneVector = source.template asDuneBlockVector<block_size>();
@ -179,7 +185,6 @@ public:
void copyOwnerToAll(const X& source, X& dest) const override void copyOwnerToAll(const X& source, X& dest) const override
{ {
OPM_ERROR_IF(&source != &dest, "The provided GpuVectors' address did not match"); // In this context, source == dest!!! OPM_ERROR_IF(&source != &dest, "The provided GpuVectors' address did not match"); // In this context, source == dest!!!
std::call_once(this->m_initializedIndices, [&]() { initIndexSet(); }); std::call_once(this->m_initializedIndices, [&]() { initIndexSet(); });
@ -198,9 +203,9 @@ public:
{ {
size_t i = 0; size_t i = 0;
for(const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) { for (const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) {
processMap[i]=info->first; processMap[i]=info->first;
if(info->second.second.m_size) { if (info->second.second.m_size) {
MPI_Irecv(m_GPURecvBuf->data()+info->second.second.m_start, MPI_Irecv(m_GPURecvBuf->data()+info->second.second.m_start,
detail::to_int(info->second.second.m_size), detail::to_int(info->second.second.m_size),
MPI_BYTE, MPI_BYTE,
@ -209,16 +214,17 @@ public:
this->m_cpuOwnerOverlapCopy.communicator(), this->m_cpuOwnerOverlapCopy.communicator(),
&recvRequests[i]); &recvRequests[i]);
numberOfRealRecvRequests += 1; numberOfRealRecvRequests += 1;
} else { }
recvRequests[i]=MPI_REQUEST_NULL; else {
recvRequests[i] = MPI_REQUEST_NULL;
} }
} }
} }
{ {
size_t i = 0; size_t i = 0;
for(const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) { for (const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) {
if(info->second.first.m_size) { if (info->second.first.m_size) {
MPI_Issend(m_GPUSendBuf->data()+info->second.first.m_start, MPI_Issend(m_GPUSendBuf->data()+info->second.first.m_start,
detail::to_int(info->second.first.m_size), detail::to_int(info->second.first.m_size),
MPI_BYTE, MPI_BYTE,
@ -227,24 +233,28 @@ public:
this->m_cpuOwnerOverlapCopy.communicator(), this->m_cpuOwnerOverlapCopy.communicator(),
&sendRequests[i]); &sendRequests[i]);
} else { } else {
sendRequests[i]=MPI_REQUEST_NULL; sendRequests[i] = MPI_REQUEST_NULL;
} }
} }
} }
int finished = MPI_UNDEFINED; int finished = MPI_UNDEFINED;
MPI_Status status; MPI_Status status;
for(size_t i = 0; i < numberOfRealRecvRequests; i++) { for (size_t i = 0; i < numberOfRealRecvRequests; i++) {
status.MPI_ERROR=MPI_SUCCESS; status.MPI_ERROR=MPI_SUCCESS;
MPI_Waitany(m_messageInformation.size(), recvRequests.data(), &finished, &status); MPI_Waitany(m_messageInformation.size(), recvRequests.data(), &finished, &status);
if(status.MPI_ERROR!=MPI_SUCCESS) { if (status.MPI_ERROR!=MPI_SUCCESS) {
OPM_THROW(std::runtime_error, fmt::format("MPI_Error occurred while rank {} received a message from rank {}", rank, processMap[finished])); OPM_THROW(std::runtime_error,
fmt::format("MPI_Error occurred while rank {} received a message from rank {}",
rank, processMap[finished]));
} }
} }
MPI_Status recvStatus; MPI_Status recvStatus;
for(size_t i = 0; i < m_messageInformation.size(); i++) { for (size_t i = 0; i < m_messageInformation.size(); i++) {
if(MPI_SUCCESS!=MPI_Wait(&sendRequests[i], &recvStatus)) { if (MPI_SUCCESS != MPI_Wait(&sendRequests[i], &recvStatus)) {
OPM_THROW(std::runtime_error, fmt::format("MPI_Error occurred while rank {} sent a message from rank {}", rank, processMap[finished])); OPM_THROW(std::runtime_error,
fmt::format("MPI_Error occurred while rank {} sent a message from rank {}",
rank, processMap[finished]));
} }
} }
// ...End of MPI stuff // ...End of MPI stuff
@ -275,24 +285,25 @@ private:
void buildCommPairIdxs() const void buildCommPairIdxs() const
{ {
auto &ri = this->m_cpuOwnerOverlapCopy.remoteIndices(); const auto& ri = this->m_cpuOwnerOverlapCopy.remoteIndices();
std::vector<int> commpairIndicesCopyOnCPU; std::vector<int> commpairIndicesCopyOnCPU;
std::vector<int> commpairIndicesOwnerCPU; std::vector<int> commpairIndicesOwnerCPU;
for(auto process : ri) { for (const auto& process : ri) {
m_im[process.first] = std::pair(std::vector<int>(), std::vector<int>()); m_im[process.first] = std::pair(std::vector<int>(), std::vector<int>());
for(int send = 0; send < 2; ++send) { for (int send = 0; send < 2; ++send) {
auto remoteEnd = send ? process.second.first->end() auto remoteEnd = send ? process.second.first->end()
: process.second.second->end(); : process.second.second->end();
auto remote = send ? process.second.first->begin() auto remote = send ? process.second.first->begin()
: process.second.second->begin(); : process.second.second->begin();
while(remote != remoteEnd) { while (remote != remoteEnd) {
if (send ? (remote->localIndexPair().local().attribute() == 1) if (send ? (remote->localIndexPair().local().attribute() == 1)
: (remote->attribute() == 1)) { : (remote->attribute() == 1)) {
if (send) { if (send) {
m_im[process.first].first.push_back(remote->localIndexPair().local().local()); m_im[process.first].first.push_back(remote->localIndexPair().local().local());
} else { }
else {
m_im[process.first].second.push_back(remote->localIndexPair().local().local()); m_im[process.first].second.push_back(remote->localIndexPair().local().local());
} }
} }
@ -317,13 +328,13 @@ private:
recvBufIdx * block_size, recvBufIdx * block_size,
noRecv * block_size * sizeof(field_type))))); noRecv * block_size * sizeof(field_type)))));
for(int x = 0; x < noSend; x++) { for (int x = 0; x < noSend; x++) {
for(int bs = 0; bs < block_size; bs++) { for (int bs = 0; bs < block_size; bs++) {
commpairIndicesOwnerCPU.push_back(it->second.first[x] * block_size + bs); commpairIndicesOwnerCPU.push_back(it->second.first[x] * block_size + bs);
} }
} }
for(int x = 0; x < noRecv; x++) { for (int x = 0; x < noRecv; x++) {
for(int bs = 0; bs < block_size; bs++) { for (int bs = 0; bs < block_size; bs++) {
commpairIndicesCopyOnCPU.push_back(it->second.second[x] * block_size + bs); commpairIndicesCopyOnCPU.push_back(it->second.second[x] * block_size + bs);
} }
} }
@ -385,9 +396,12 @@ class GpuOwnerOverlapCopy
public: public:
using X = GpuVector<field_type>; using X = GpuVector<field_type>;
explicit GpuOwnerOverlapCopy(std::shared_ptr<GPUSender<field_type, OwnerOverlapCopyCommunicationType>> sender) : m_sender(sender){} explicit GpuOwnerOverlapCopy(std::shared_ptr<GPUSender<field_type, OwnerOverlapCopyCommunicationType>> sender)
: m_sender(sender)
{}
void copyOwnerToAll(const X& source, X& dest) const { void copyOwnerToAll(const X& source, X& dest) const
{
m_sender->copyOwnerToAll(source, dest); m_sender->copyOwnerToAll(source, dest);
} }
@ -409,5 +423,7 @@ public:
private: private:
std::shared_ptr<GPUSender<field_type, OwnerOverlapCopyCommunicationType>> m_sender; std::shared_ptr<GPUSender<field_type, OwnerOverlapCopyCommunicationType>> m_sender;
}; };
} // namespace Opm::gpuistl } // namespace Opm::gpuistl
#endif #endif

View File

@ -6,17 +6,22 @@
// NOTE: This file is a modified version of dune/istl/paamg/twolevelmethod.hh from // NOTE: This file is a modified version of dune/istl/paamg/twolevelmethod.hh from
// dune-istl release 2.6.0. Modifications have been kept as minimal as possible. // dune-istl release 2.6.0. Modifications have been kept as minimal as possible.
#include <tuple> #include <dune/istl/operators.hh>
#include<dune/istl/operators.hh>
//#include "amg.hh" //#include "amg.hh"
//#include"galerkin.hh" //#include"galerkin.hh"
#include<dune/istl/paamg/amg.hh> #include <dune/istl/paamg/amg.hh>
#include<dune/istl/paamg/galerkin.hh> #include <dune/istl/paamg/galerkin.hh>
#include<dune/istl/solver.hh> #include <dune/istl/solver.hh>
#include<dune/common/unused.hh> #include <dune/common/unused.hh>
#include<dune/common/version.hh> #include <dune/common/version.hh>
#include <algorithm>
#include <cstddef>
#include <iostream>
#include <memory>
#include <tuple>
#include <vector>
/** /**
* @addtogroup ISTL_PAAMG * @addtogroup ISTL_PAAMG
@ -189,13 +194,8 @@ public:
ParallelInformation pinfo; ParallelInformation pinfo;
std::size_t aggregates = coarsen(renumberer, pinfo, pg, vm,*aggregatesMap_, noAggregates, true); std::size_t aggregates = coarsen(renumberer, pinfo, pg, vm,*aggregatesMap_, noAggregates, true);
std::vector<bool>& visited=excluded;
typedef std::vector<bool>::iterator Iterator; std::fill(excluded.begin(), excluded.end(), false);
for(Iterator iter= visited.begin(), end=visited.end();
iter != end; ++iter)
*iter=false;
matrix_.reset(productBuilder.build(mg, vm, matrix_.reset(productBuilder.build(mg, vm,
SequentialInformation(), SequentialInformation(),
*aggregatesMap_, *aggregatesMap_,

View File

@ -542,7 +542,7 @@ extern "C" {
if (cells.empty()) { return; } if (cells.empty()) { return; }
const auto first = parts[cells.front()]; const auto first = parts[cells.front()];
for (auto& cell : cells) { for (const auto& cell : cells) {
parts[cell] = first; parts[cell] = first;
} }
} }

View File

@ -259,7 +259,7 @@ template<class Scalar> class WellContributions;
// add source from wells to the reservoir matrix // add source from wells to the reservoir matrix
void addReservoirSourceTerms(GlobalEqVector& residual, void addReservoirSourceTerms(GlobalEqVector& residual,
std::vector<typename SparseMatrixAdapter::MatrixBlock*>& diagMatAddress) const; const std::vector<typename SparseMatrixAdapter::MatrixBlock*>& diagMatAddress) const;
// called at the beginning of a report step // called at the beginning of a report step
void beginReportStep(const int time_step); void beginReportStep(const int time_step);

View File

@ -1906,7 +1906,7 @@ getMaxWellConnections() const
const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name()); const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name());
if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) { if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) {
for (auto& global_index : possibleFutureConnectionSetIt->second) { for (const auto& global_index : possibleFutureConnectionSetIt->second) {
int compressed_idx = compressedIndexForInterior(global_index); int compressed_idx = compressedIndexForInterior(global_index);
if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells. if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells.
compressed_well_perforations.push_back(compressed_idx); compressed_well_perforations.push_back(compressed_idx);

View File

@ -433,7 +433,7 @@ namespace Opm {
// have proper multi-phase rates proportional to rates at bhp zero. // have proper multi-phase rates proportional to rates at bhp zero.
// This is done only for producers, as injectors will only have a single // This is done only for producers, as injectors will only have a single
// nonzero phase anyway. // nonzero phase anyway.
for (auto& well : well_container_) { for (const auto& well : well_container_) {
const bool zero_target = well->stoppedOrZeroRateTarget(simulator_, this->wellState(), local_deferredLogger); const bool zero_target = well->stoppedOrZeroRateTarget(simulator_, this->wellState(), local_deferredLogger);
if (well->isProducer() && !zero_target) { if (well->isProducer() && !zero_target) {
well->updateWellStateRates(simulator_, this->wellState(), local_deferredLogger); well->updateWellStateRates(simulator_, this->wellState(), local_deferredLogger);
@ -441,7 +441,7 @@ namespace Opm {
} }
} }
for (auto& well : well_container_) { for (const auto& well : well_container_) {
if (well->isVFPActive(local_deferredLogger)){ if (well->isVFPActive(local_deferredLogger)){
well->setPrevSurfaceRates(this->wellState(), this->prevWellState()); well->setPrevSurfaceRates(this->wellState(), this->prevWellState());
} }
@ -478,7 +478,7 @@ namespace Opm {
auto exc_type = ExceptionType::NONE; auto exc_type = ExceptionType::NONE;
// update gpmaint targets // update gpmaint targets
if (this->schedule_[reportStepIdx].has_gpmaint()) { if (this->schedule_[reportStepIdx].has_gpmaint()) {
for (auto& calculator : regionalAveragePressureCalculator_) { for (const auto& calculator : regionalAveragePressureCalculator_) {
calculator.second->template defineState<ElementContext>(simulator_); calculator.second->template defineState<ElementContext>(simulator_);
} }
const double dt = simulator_.timeStepSize(); const double dt = simulator_.timeStepSize();
@ -1641,7 +1641,7 @@ namespace Opm {
template <typename TypeTag> template <typename TypeTag>
void BlackoilWellModel<TypeTag>:: void BlackoilWellModel<TypeTag>::
addReservoirSourceTerms(GlobalEqVector& residual, addReservoirSourceTerms(GlobalEqVector& residual,
std::vector<typename SparseMatrixAdapter::MatrixBlock*>& diagMatAddress) const const std::vector<typename SparseMatrixAdapter::MatrixBlock*>& diagMatAddress) const
{ {
// NB this loop may write multiple times to the same element // NB this loop may write multiple times to the same element
// if a cell is perforated by more than one well, so it should // if a cell is perforated by more than one well, so it should
@ -2011,7 +2011,10 @@ namespace Opm {
const int iterationIdx, const int iterationIdx,
DeferredLogger& deferred_logger) DeferredLogger& deferred_logger)
{ {
this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger); this->updateAndCommunicateGroupData(reportStepIdx,
iterationIdx,
param_.nupcol_group_rate_tolerance_,
deferred_logger);
// updateWellStateWithTarget might throw for multisegment wells hence we // updateWellStateWithTarget might throw for multisegment wells hence we
// have a parallel try catch here to thrown on all processes. // have a parallel try catch here to thrown on all processes.
@ -2019,15 +2022,20 @@ namespace Opm {
// if a well or group change control it affects all wells that are under the same group // if a well or group change control it affects all wells that are under the same group
for (const auto& well : well_container_) { for (const auto& well : well_container_) {
// We only want to update wells under group-control here // We only want to update wells under group-control here
auto& ws = this->wellState().well(well->indexOfWell()); const auto& ws = this->wellState().well(well->indexOfWell());
if (ws.production_cmode == Well::ProducerCMode::GRUP || ws.injection_cmode == Well::InjectorCMode::GRUP) { if (ws.production_cmode == Well::ProducerCMode::GRUP ||
ws.injection_cmode == Well::InjectorCMode::GRUP)
{
well->updateWellStateWithTarget(simulator_, this->groupState(), well->updateWellStateWithTarget(simulator_, this->groupState(),
this->wellState(), deferred_logger); this->wellState(), deferred_logger);
} }
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAndCommunicate failed: ", OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAndCommunicate failed: ",
simulator_.gridView().comm()) simulator_.gridView().comm())
this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger); this->updateAndCommunicateGroupData(reportStepIdx,
iterationIdx,
param_.nupcol_group_rate_tolerance_,
deferred_logger);
} }
template<typename TypeTag> template<typename TypeTag>

View File

@ -1590,7 +1590,7 @@ template<class Scalar>
void GasLiftSingleWellGeneric<Scalar>:: void GasLiftSingleWellGeneric<Scalar>::
updateWellStateAlqFixedValue_(const GasLiftWell& well) updateWellStateAlqFixedValue_(const GasLiftWell& well)
{ {
auto& max_alq_optional = well.max_rate(); const auto& max_alq_optional = well.max_rate();
if (max_alq_optional) { if (max_alq_optional) {
// According to WLIFTOPT, item 3: // According to WLIFTOPT, item 3:
// If item 2 is NO, then item 3 is regarded as the fixed // If item 2 is NO, then item 3 is regarded as the fixed

View File

@ -215,7 +215,7 @@ void
GasLiftSingleWell<TypeTag>:: GasLiftSingleWell<TypeTag>::
setAlqMaxRate_(const GasLiftWell& well) setAlqMaxRate_(const GasLiftWell& well)
{ {
auto& max_alq_optional = well.max_rate(); const auto& max_alq_optional = well.max_rate();
if (max_alq_optional) { if (max_alq_optional) {
// NOTE: To prevent extrapolation of the VFP tables, any value // NOTE: To prevent extrapolation of the VFP tables, any value
// entered here must not exceed the largest ALQ value in the well's VFP table. // entered here must not exceed the largest ALQ value in the well's VFP table.

View File

@ -474,7 +474,7 @@ recalculateGradientAndUpdateData_(GradPairItr& grad_itr,
// only applies to wells in the well_state_map (i.e. wells on this rank) // only applies to wells in the well_state_map (i.e. wells on this rank)
// the grads and other grads are synchronized later // the grads and other grads are synchronized later
if(this->stage1_wells_.count(name) > 0) { if(this->stage1_wells_.count(name) > 0) {
GasLiftSingleWell &gs_well = *(this->stage1_wells_.at(name).get()); const GasLiftSingleWell& gs_well = *(this->stage1_wells_.at(name).get());
{ {
auto grad = calcIncOrDecGrad_(name, gs_well, gr_name_dont_limit, increase); auto grad = calcIncOrDecGrad_(name, gs_well, gr_name_dont_limit, increase);
if (grad) { if (grad) {

View File

@ -121,7 +121,7 @@ public:
// Create a function that calls some function // Create a function that calls some function
// for all the individual data items to simplify // for all the individual data items to simplify
// the further code. // the further code.
auto iterateContainer = [](auto& container, auto& func) { auto iterateContainer = [](auto& container, const auto& func) {
for (auto& x : container) { for (auto& x : container) {
func(x.second); func(x.second);
} }

View File

@ -81,7 +81,7 @@ public:
// Now add the cells of the possible future connections // Now add the cells of the possible future connections
const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name()); const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name());
if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) { if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) {
for (auto& global_index : possibleFutureConnectionSetIt->second) { for (const auto& global_index : possibleFutureConnectionSetIt->second) {
int compressed_idx = model_.compressedIndexForInterior(global_index); int compressed_idx = model_.compressedIndexForInterior(global_index);
if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells. if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells.
wellCells.push_back(compressed_idx); wellCells.push_back(compressed_idx);
@ -126,7 +126,7 @@ public:
} }
} }
void postSolveDomain(GlobalEqVector& deltaX, const Domain& domain) void postSolveDomain(const GlobalEqVector& deltaX, const Domain& domain)
{ {
model_.recoverWellSolutionAndUpdateWellStateDomain(deltaX, domain); model_.recoverWellSolutionAndUpdateWellStateDomain(deltaX, domain);
} }

View File

@ -476,7 +476,7 @@ namespace Opm
deferred_logger.info(msg); deferred_logger.info(msg);
// also reopen completions // also reopen completions
for (auto& completion : this->well_ecl_.getCompletions()) { for (const auto& completion : this->well_ecl_.getCompletions()) {
if (!welltest_state_temp.completion_is_closed(this->name(), completion.first)) if (!welltest_state_temp.completion_is_closed(this->name(), completion.first))
well_test_state.open_completion(this->name(), completion.first); well_test_state.open_completion(this->name(), completion.first);
} }

View File

@ -116,7 +116,8 @@ createBridge(const boost::property_tree::ptree& prm, std::unique_ptr<Opm::GpuBri
template <int bz> template <int bz>
Dune::BlockVector<Dune::FieldVector<double, bz>> Dune::BlockVector<Dune::FieldVector<double, bz>>
testOpenclSolver(Opm::GpuBridge<Matrix<bz>, Vector<bz>, bz>& bridge, Matrix<bz>& matrix, Vector<bz>& rhs) testOpenclSolver(Opm::GpuBridge<Matrix<bz>, Vector<bz>, bz>& bridge,
const Matrix<bz>& matrix, Vector<bz>& rhs)
{ {
Dune::InverseOperatorResult result; Dune::InverseOperatorResult result;
Vector<bz> x(rhs.size()); Vector<bz> x(rhs.size());
@ -131,7 +132,8 @@ testOpenclSolver(Opm::GpuBridge<Matrix<bz>, Vector<bz>, bz>& bridge, Matrix<bz>&
template <int bz> template <int bz>
Dune::BlockVector<Dune::FieldVector<double, bz>> Dune::BlockVector<Dune::FieldVector<double, bz>>
testOpenclSolverJacobi(Opm::GpuBridge<Matrix<bz>, Vector<bz>, bz>& bridge, Matrix<bz>& matrix, Vector<bz>& rhs) testOpenclSolverJacobi(Opm::GpuBridge<Matrix<bz>, Vector<bz>, bz>& bridge,
const Matrix<bz>& matrix, Vector<bz>& rhs)
{ {
Dune::InverseOperatorResult result; Dune::InverseOperatorResult result;
Vector<bz> x(rhs.size()); Vector<bz> x(rhs.size());