From 76c947b04c831c323b8a0d40c79cf88b85b822ef Mon Sep 17 00:00:00 2001 From: Arne Morten Kvarving Date: Wed, 22 Jan 2025 16:16:42 +0100 Subject: [PATCH] cosmetics --- .../aquifers/BlackoilAquiferModel_impl.hpp | 3 +- .../flow/GenericThresholdPressure_impl.hpp | 43 +++++++---- opm/simulators/flow/TracerModel.hpp | 2 +- opm/simulators/linalg/gpubridge/Reorder.cpp | 74 +++++++++++-------- opm/simulators/linalg/gpubridge/Reorder.hpp | 31 +++++--- .../linalg/gpubridge/opencl/openclBILU0.cpp | 3 +- .../linalg/gpuistl/GpuOwnerOverlapCopy.hpp | 69 +++++++++-------- .../wells/BlackoilWellModel_impl.hpp | 14 +++- 8 files changed, 149 insertions(+), 90 deletions(-) diff --git a/opm/simulators/aquifers/BlackoilAquiferModel_impl.hpp b/opm/simulators/aquifers/BlackoilAquiferModel_impl.hpp index e77ef9595..633618170 100644 --- a/opm/simulators/aquifers/BlackoilAquiferModel_impl.hpp +++ b/opm/simulators/aquifers/BlackoilAquiferModel_impl.hpp @@ -139,8 +139,9 @@ BlackoilAquiferModel::endTimeStep() for (auto& aquifer : this->aquifers) { aquifer->endTimeStep(); NumAq* num = dynamic_cast(aquifer.get()); - if (num) + if (num) { this->simulator_.vanguard().grid().comm().barrier(); + } } } diff --git a/opm/simulators/flow/GenericThresholdPressure_impl.hpp b/opm/simulators/flow/GenericThresholdPressure_impl.hpp index 7e0defdd6..5ddc4b06a 100644 --- a/opm/simulators/flow/GenericThresholdPressure_impl.hpp +++ b/opm/simulators/flow/GenericThresholdPressure_impl.hpp @@ -73,10 +73,11 @@ thresholdPressure(int elem1Idx, int elem2Idx) const int fault1Idx = lookUpCartesianData_(elem1Idx, cartElemFaultIdx_); int fault2Idx = lookUpCartesianData_(elem2Idx, cartElemFaultIdx_); - if (fault1Idx != -1 && fault1Idx == fault2Idx) + if (fault1Idx != -1 && fault1Idx == fault2Idx) { // inside a fault there's no threshold pressure, even accross EQUIL // regions. return 0.0; + } if (fault1Idx != fault2Idx) { // TODO: which value if a cell is part of multiple faults? we take // the maximum here. @@ -90,8 +91,9 @@ thresholdPressure(int elem1Idx, int elem2Idx) const auto equilRegion1Idx = elemEquilRegion_[elem1Idx]; auto equilRegion2Idx = elemEquilRegion_[elem2Idx]; - if (equilRegion1Idx == equilRegion2Idx) + if (equilRegion1Idx == equilRegion2Idx) { return 0.0; + } return thpres_[equilRegion1Idx*numEquilRegions_ + equilRegion2Idx]; } @@ -103,8 +105,9 @@ finishInit() const auto& simConfig = eclState_.getSimulationConfig(); enableThresholdPressure_ = simConfig.useThresholdPressure(); - if (!enableThresholdPressure_) + if (!enableThresholdPressure_) { return; + } numEquilRegions_ = eclState_.getTableManager().getEqldims().getNumEquilRegions(); const decltype(numEquilRegions_) maxRegions = @@ -129,16 +132,18 @@ finishInit() } // internalize the data specified using the EQLNUM keyword - elemEquilRegion_ = lookUpData_.template assignFieldPropsIntOnLeaf(eclState_.fieldProps(), - "EQLNUM", true); + elemEquilRegion_ = lookUpData_. + template assignFieldPropsIntOnLeaf(eclState_.fieldProps(), + "EQLNUM", true); /* If this is a restart run the ThresholdPressure object will be active, and already properly initialized with numerical values from the restart. Done using GenericThresholdPressure::setFromRestart() in EclWriter::beginRestart(). */ - if (simConfig.getThresholdPressure().restart()) + if (simConfig.getThresholdPressure().restart()) { return; + } // allocate the array which specifies the threshold pressures thpres_.resize(numEquilRegions_*numEquilRegions_, 0.0); @@ -156,10 +161,12 @@ applyExplicitThresholdPressures_() // intersection in the grid for (const auto& elem : elements(gridView_, Dune::Partitions::interior)) { for (const auto& intersection : intersections(gridView_, elem)) { - if (intersection.boundary()) + if (intersection.boundary()) { continue; // ignore boundary intersections for now (TODO?) - else if (!intersection.neighbor()) //processor boundary but not domain boundary + } + else if (!intersection.neighbor()) { // processor boundary but not domain boundary continue; + } const auto& inside = intersection.inside(); const auto& outside = intersection.outside(); @@ -189,8 +196,9 @@ applyExplicitThresholdPressures_() } // apply threshold pressures across faults - if (thpres.ftSize() > 0) + if (thpres.ftSize() > 0) { configureThpresft_(); + } } template @@ -207,14 +215,16 @@ configureThpresft_() int numCartesianElem = eclState_.getInputGrid().getCartesianSize(); thpresftValues_.resize(numFaults, -1.0); cartElemFaultIdx_.resize(numCartesianElem, -1); - for (std::size_t faultIdx = 0; faultIdx < faults.size(); faultIdx++) { + for (std::size_t faultIdx = 0; faultIdx < faults.size(); ++faultIdx) { auto& fault = faults.getFault(faultIdx); thpresftValues_[faultIdx] = thpres.getThresholdPressureFault(faultIdx); - for (const FaultFace& face : fault) + for (const FaultFace& face : fault) { // "face" is a misnomer because the object describes a set of cell // indices, but we go with the conventions of the parser here... - for (std::size_t cartElemIdx : face) + for (std::size_t cartElemIdx : face) { cartElemFaultIdx_[cartElemIdx] = faultIdx; + } + } } } @@ -223,8 +233,9 @@ std::vector GenericThresholdPressure:: getRestartVector() const { - if (!enableThresholdPressure_) + if (!enableThresholdPressure_) { return {}; + } return this->thpres_; } @@ -243,8 +254,9 @@ void GenericThresholdPressure:: logPressures() { - if (!enableThresholdPressure_) + if (!enableThresholdPressure_) { return; + } auto lineFormat = [this](unsigned i, unsigned j, double val) { @@ -274,7 +286,8 @@ logPressures() if (thpres.hasRegionBarrier(i, j)) { if (thpres.hasThresholdPressure(i, j)) { str += lineFormat(i, j, thpres.getThresholdPressure(j, i)); - } else { + } + else { std::size_t idx = (j - 1) * numEquilRegions_ + (i - 1); str += lineFormat(i, j, this->thpresDefault_[idx]); } diff --git a/opm/simulators/flow/TracerModel.hpp b/opm/simulators/flow/TracerModel.hpp index 23bb2a16d..6ea2d23b2 100644 --- a/opm/simulators/flow/TracerModel.hpp +++ b/opm/simulators/flow/TracerModel.hpp @@ -892,7 +892,7 @@ protected: TracerBatch(int phaseIdx = 0) : phaseIdx_(phaseIdx) {} - int numTracer() const {return idx_.size(); } + int numTracer() const { return idx_.size(); } void addTracer(const int idx, const TV & concentration) { diff --git a/opm/simulators/linalg/gpubridge/Reorder.cpp b/opm/simulators/linalg/gpubridge/Reorder.cpp index e488bec66..f8d029c5e 100644 --- a/opm/simulators/linalg/gpubridge/Reorder.cpp +++ b/opm/simulators/linalg/gpubridge/Reorder.cpp @@ -23,28 +23,28 @@ #include -#include #include +#include - -namespace Opm -{ -namespace Accelerator -{ - +namespace Opm::Accelerator { /* Check is operations on a node in the matrix can be started - * A node can only be started if all nodes that it depends on during sequential execution have already completed.*/ + * A node can only be started if all nodes that it depends on during sequential + * execution have already completed.*/ -bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIndices, const std::vector& doneRows) { +bool canBeStarted(const int rowIndex, + const int* rowPointers, + const int* colIndices, + const std::vector& doneRows) +{ bool canStart = !doneRows[rowIndex]; - int i, thisDependency; if (canStart) { - for (i = rowPointers[rowIndex]; i < rowPointers[rowIndex + 1]; i++) { - thisDependency = colIndices[i]; + for (int i = rowPointers[rowIndex]; i < rowPointers[rowIndex + 1]; ++i) { + int thisDependency = colIndices[i]; // Only dependencies on rows that should execute before the current one are relevant - if (thisDependency >= rowIndex) + if (thisDependency >= rowIndex) { break; + } // Check if dependency has been resolved if (!doneRows[thisDependency]) { return false; @@ -55,14 +55,23 @@ bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIndi } /* - * The level scheduling of a non-symmetric, blocked matrix requires access to a CSC encoding and a CSR encoding of the sparsity pattern of the input matrix. + * The level scheduling of a non-symmetric, blocked matrix requires access to a CSC + * encoding and a CSR encoding of the sparsity pattern of the input matrix. * This function is based on a standard level scheduling algorithm, like the one described in: * "Iterative methods for Sparse Linear Systems" by Yousef Saad in section 11.6.3 */ -void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb, int *numColors, int *toOrder, int* fromOrder, std::vector& rowsPerColor) { - int activeRowIndex = 0, colorEnd, nextActiveRowIndex = 0; - int thisRow; +void findLevelScheduling(int* CSRColIndices, + int* CSRRowPointers, + int* CSCRowIndices, + int* CSCColPointers, + int Nb, + int* numColors, + int* toOrder, + int* fromOrder, + std::vector& rowsPerColor) +{ + int activeRowIndex = 0, nextActiveRowIndex = 0; std::vector doneRows(Nb, false); std::vector rowsToStart; @@ -70,23 +79,27 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd assert(rowsPerColor.empty()); // find starting rows: rows that are independent from all rows that come before them. - for (thisRow = 0; thisRow < Nb; thisRow++) { + int thisRow; + for (thisRow = 0; thisRow < Nb; ++thisRow) { if (canBeStarted(thisRow, CSCColPointers, CSCRowIndices, doneRows)) { fromOrder[nextActiveRowIndex] = thisRow; toOrder[thisRow] = nextActiveRowIndex; - nextActiveRowIndex++; + ++nextActiveRowIndex; } } + // 'do' compute on all active rows - for (colorEnd = 0; colorEnd < nextActiveRowIndex; colorEnd++) { + int colorEnd; + for (colorEnd = 0; colorEnd < nextActiveRowIndex; ++colorEnd) { doneRows[fromOrder[colorEnd]] = true; } rowsPerColor.emplace_back(nextActiveRowIndex - activeRowIndex); while (colorEnd < Nb) { - // Go over all rows active from the last color, and check which of their neighbours can be activated this color - for (; activeRowIndex < colorEnd; activeRowIndex++) { + // Go over all rows active from the last color, and check which of + // their neighbours can be activated this color + for (; activeRowIndex < colorEnd; ++activeRowIndex) { thisRow = fromOrder[activeRowIndex]; for (int i = CSCColPointers[thisRow]; i < CSCColPointers[thisRow + 1]; i++) { @@ -104,7 +117,7 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd doneRows[thisRow] = true; fromOrder[nextActiveRowIndex] = thisRow; toOrder[thisRow] = nextActiveRowIndex; - nextActiveRowIndex++; + ++nextActiveRowIndex; } } rowsToStart.clear(); @@ -115,10 +128,13 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd *numColors = rowsPerColor.size(); } - // based on the scipy package from python, scipy/sparse/sparsetools/csr.h on github -void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb) { - +void csrPatternToCsc(int* CSRColIndices, + int* CSRRowPointers, + int* CSCRowIndices, + int* CSCColPointers, + int Nb) +{ int nnz = CSRRowPointers[Nb]; // compute number of nnzs per column @@ -141,7 +157,7 @@ void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices int col = CSRColIndices[j]; int dest = CSCColPointers[col]; CSCRowIndices[dest] = row; - CSCColPointers[col]++; + ++CSCColPointers[col]; } } @@ -152,6 +168,4 @@ void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices } } - -} // namespace Accelerator -} // namespace Opm +} // namespace Opm::Accelerator diff --git a/opm/simulators/linalg/gpubridge/Reorder.hpp b/opm/simulators/linalg/gpubridge/Reorder.hpp index 27d960557..0e75bc7fc 100644 --- a/opm/simulators/linalg/gpubridge/Reorder.hpp +++ b/opm/simulators/linalg/gpubridge/Reorder.hpp @@ -22,10 +22,7 @@ #include -namespace Opm -{ -namespace Accelerator -{ +namespace Opm::Accelerator { /// Determine whether all rows that a certain row depends on are done already /// \param[in] rowIndex index of the row that needs to be checked for @@ -33,7 +30,10 @@ namespace Accelerator /// \param[in] colIndices column indices of the matrix that the row is in /// \param[in] doneRows array that for each row lists whether it is done or not /// \return true iff all dependencies are done and if the result itself was not done yet -bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIndices, const std::vector& doneRows); +bool canBeStarted(const int rowIndex, + const int* rowPointers, + const int* colIndices, + const std::vector& doneRows); /// Find a level scheduling reordering for an input matrix /// The toOrder and fromOrder arrays must be allocated already @@ -46,7 +46,15 @@ bool canBeStarted(const int rowIndex, const int *rowPointers, const int *colIn /// \param[out] toOrder the reorder pattern that was found, which lists for each index in the original order, to which index in the new order it should be moved /// \param[out] fromOrder the reorder pattern that was found, which lists for each index in the new order, from which index in the original order it was moved /// \param[out] rowsPerColor for each color, an array of all rowIndices in that color, this function uses emplace_back() to fill -void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb, int *numColors, int *toOrder, int* fromOrder, std::vector& rowsPerColor); +void findLevelScheduling(int* CSRColIndices, + int* CSRRowPointers, + int* CSCRowIndices, + int* CSCColPointers, + int Nb, + int* numColors, + int* toOrder, + int* fromOrder, + std::vector& rowsPerColor); /// Convert a sparsity pattern stored in the CSR format to the CSC format /// CSCRowIndices and CSCColPointers arrays must be allocated already @@ -56,9 +64,12 @@ void findLevelScheduling(int *CSRColIndices, int *CSRRowPointers, int *CSCRowInd /// \param[inout] CSCRowIndices row indices of the result CSC representation of the pattern /// \param[inout] CSCColPointers column pointers of the result CSC representation of the pattern /// \param[in] Nb number of blockrows in the matrix -void csrPatternToCsc(int *CSRColIndices, int *CSRRowPointers, int *CSCRowIndices, int *CSCColPointers, int Nb); +void csrPatternToCsc(int* CSRColIndices, + int* CSRRowPointers, + int* CSCRowIndices, + int* CSCColPointers, + int Nb); -} // namespace Accelerator -} // namespace Opm +} // namespace Opm::Accelerator -#endif \ No newline at end of file +#endif diff --git a/opm/simulators/linalg/gpubridge/opencl/openclBILU0.cpp b/opm/simulators/linalg/gpubridge/opencl/openclBILU0.cpp index cb3942cb4..138eae98c 100644 --- a/opm/simulators/linalg/gpubridge/opencl/openclBILU0.cpp +++ b/opm/simulators/linalg/gpubridge/opencl/openclBILU0.cpp @@ -198,8 +198,9 @@ create_preconditioner(BlockedMatrix* mat, BlockedMatrix* jacMat) bool use_multithreading = true; #if HAVE_OPENMP - if (omp_get_max_threads() == 1) + if (omp_get_max_threads() == 1) { use_multithreading = false; + } #endif if (jacMat && use_multithreading) { diff --git a/opm/simulators/linalg/gpuistl/GpuOwnerOverlapCopy.hpp b/opm/simulators/linalg/gpuistl/GpuOwnerOverlapCopy.hpp index fbebeb636..a42252639 100644 --- a/opm/simulators/linalg/gpuistl/GpuOwnerOverlapCopy.hpp +++ b/opm/simulators/linalg/gpuistl/GpuOwnerOverlapCopy.hpp @@ -24,8 +24,8 @@ #include #include -namespace Opm::gpuistl -{ +namespace Opm::gpuistl { + /** * @brief GPUSender is a wrapper class for classes which will implement copOwnerToAll * This is implemented with the intention of creating communicators with generic GPUSender @@ -119,10 +119,11 @@ public: explicit GPUObliviousMPISender(const OwnerOverlapCopyCommunicationType& cpuOwnerOverlapCopy) : GPUSender(cpuOwnerOverlapCopy) - { - } + { + } - void copyOwnerToAll(const X& source, X& dest) const override { + void copyOwnerToAll(const X& source, X& dest) const override + { // TODO: [perf] Can we reduce copying from the GPU here? // TODO: [perf] Maybe create a global buffer instead? auto sourceAsDuneVector = source.template asDuneBlockVector(); @@ -179,7 +180,6 @@ public: void copyOwnerToAll(const X& source, X& dest) const override { - OPM_ERROR_IF(&source != &dest, "The provided GpuVectors' address did not match"); // In this context, source == dest!!! std::call_once(this->m_initializedIndices, [&]() { initIndexSet(); }); @@ -198,9 +198,9 @@ public: { size_t i = 0; - for(const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) { + for (const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) { processMap[i]=info->first; - if(info->second.second.m_size) { + if (info->second.second.m_size) { MPI_Irecv(m_GPURecvBuf->data()+info->second.second.m_start, detail::to_int(info->second.second.m_size), MPI_BYTE, @@ -209,16 +209,17 @@ public: this->m_cpuOwnerOverlapCopy.communicator(), &recvRequests[i]); numberOfRealRecvRequests += 1; - } else { - recvRequests[i]=MPI_REQUEST_NULL; + } + else { + recvRequests[i] = MPI_REQUEST_NULL; } } } { size_t i = 0; - for(const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) { - if(info->second.first.m_size) { + for (const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) { + if (info->second.first.m_size) { MPI_Issend(m_GPUSendBuf->data()+info->second.first.m_start, detail::to_int(info->second.first.m_size), MPI_BYTE, @@ -227,24 +228,28 @@ public: this->m_cpuOwnerOverlapCopy.communicator(), &sendRequests[i]); } else { - sendRequests[i]=MPI_REQUEST_NULL; + sendRequests[i] = MPI_REQUEST_NULL; } } } int finished = MPI_UNDEFINED; MPI_Status status; - for(size_t i = 0; i < numberOfRealRecvRequests; i++) { + for (size_t i = 0; i < numberOfRealRecvRequests; i++) { status.MPI_ERROR=MPI_SUCCESS; MPI_Waitany(m_messageInformation.size(), recvRequests.data(), &finished, &status); - if(status.MPI_ERROR!=MPI_SUCCESS) { - OPM_THROW(std::runtime_error, fmt::format("MPI_Error occurred while rank {} received a message from rank {}", rank, processMap[finished])); + if (status.MPI_ERROR!=MPI_SUCCESS) { + OPM_THROW(std::runtime_error, + fmt::format("MPI_Error occurred while rank {} received a message from rank {}", + rank, processMap[finished])); } } MPI_Status recvStatus; - for(size_t i = 0; i < m_messageInformation.size(); i++) { - if(MPI_SUCCESS!=MPI_Wait(&sendRequests[i], &recvStatus)) { - OPM_THROW(std::runtime_error, fmt::format("MPI_Error occurred while rank {} sent a message from rank {}", rank, processMap[finished])); + for (size_t i = 0; i < m_messageInformation.size(); i++) { + if (MPI_SUCCESS != MPI_Wait(&sendRequests[i], &recvStatus)) { + OPM_THROW(std::runtime_error, + fmt::format("MPI_Error occurred while rank {} sent a message from rank {}", + rank, processMap[finished])); } } // ...End of MPI stuff @@ -279,20 +284,21 @@ private: std::vector commpairIndicesCopyOnCPU; std::vector commpairIndicesOwnerCPU; - for(auto process : ri) { + for (auto process : ri) { m_im[process.first] = std::pair(std::vector(), std::vector()); - for(int send = 0; send < 2; ++send) { + for (int send = 0; send < 2; ++send) { auto remoteEnd = send ? process.second.first->end() : process.second.second->end(); auto remote = send ? process.second.first->begin() : process.second.second->begin(); - while(remote != remoteEnd) { + while (remote != remoteEnd) { if (send ? (remote->localIndexPair().local().attribute() == 1) : (remote->attribute() == 1)) { if (send) { m_im[process.first].first.push_back(remote->localIndexPair().local().local()); - } else { + } + else { m_im[process.first].second.push_back(remote->localIndexPair().local().local()); } } @@ -317,13 +323,13 @@ private: recvBufIdx * block_size, noRecv * block_size * sizeof(field_type))))); - for(int x = 0; x < noSend; x++) { - for(int bs = 0; bs < block_size; bs++) { + for (int x = 0; x < noSend; x++) { + for (int bs = 0; bs < block_size; bs++) { commpairIndicesOwnerCPU.push_back(it->second.first[x] * block_size + bs); } } - for(int x = 0; x < noRecv; x++) { - for(int bs = 0; bs < block_size; bs++) { + for (int x = 0; x < noRecv; x++) { + for (int bs = 0; bs < block_size; bs++) { commpairIndicesCopyOnCPU.push_back(it->second.second[x] * block_size + bs); } } @@ -385,9 +391,12 @@ class GpuOwnerOverlapCopy public: using X = GpuVector; - explicit GpuOwnerOverlapCopy(std::shared_ptr> sender) : m_sender(sender){} + explicit GpuOwnerOverlapCopy(std::shared_ptr> sender) + : m_sender(sender) + {} - void copyOwnerToAll(const X& source, X& dest) const { + void copyOwnerToAll(const X& source, X& dest) const + { m_sender->copyOwnerToAll(source, dest); } @@ -409,5 +418,7 @@ public: private: std::shared_ptr> m_sender; }; + } // namespace Opm::gpuistl + #endif diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 287169ee0..fcb82ca9b 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -2011,7 +2011,10 @@ namespace Opm { const int iterationIdx, DeferredLogger& deferred_logger) { - this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger); + this->updateAndCommunicateGroupData(reportStepIdx, + iterationIdx, + param_.nupcol_group_rate_tolerance_, + deferred_logger); // updateWellStateWithTarget might throw for multisegment wells hence we // have a parallel try catch here to thrown on all processes. @@ -2020,14 +2023,19 @@ namespace Opm { for (const auto& well : well_container_) { // We only want to update wells under group-control here auto& ws = this->wellState().well(well->indexOfWell()); - if (ws.production_cmode == Well::ProducerCMode::GRUP || ws.injection_cmode == Well::InjectorCMode::GRUP) { + if (ws.production_cmode == Well::ProducerCMode::GRUP || + ws.injection_cmode == Well::InjectorCMode::GRUP) + { well->updateWellStateWithTarget(simulator_, this->groupState(), this->wellState(), deferred_logger); } } OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAndCommunicate failed: ", simulator_.gridView().comm()) - this->updateAndCommunicateGroupData(reportStepIdx, iterationIdx, param_.nupcol_group_rate_tolerance_, deferred_logger); + this->updateAndCommunicateGroupData(reportStepIdx, + iterationIdx, + param_.nupcol_group_rate_tolerance_, + deferred_logger); } template