From 69a5c39738bcc43b1dc20133e9d6faadd9435afa Mon Sep 17 00:00:00 2001 From: Halvor M Nilsen Date: Fri, 7 Feb 2025 16:25:50 +0100 Subject: [PATCH] Tidy Up Parts of Grid Processing Implementation In particular 1. Reduce scope of some objects 2. Remove unneeded objects 3. Split some long lines. 4. Realign some code that was indented multiple levels 5. Tag '#endif' pre-processor statements with their condition This is in preparation of introducing support for edge-conformal processing of corner-point descriptions. --- opm/simulators/flow/FlowGenericVanguard.cpp | 51 ++++++----- opm/simulators/flow/FlowGenericVanguard.hpp | 12 ++- opm/simulators/flow/GenericCpGridVanguard.cpp | 91 +++++++++---------- .../flow/PolyhedralGridVanguard.hpp | 10 +- 4 files changed, 86 insertions(+), 78 deletions(-) diff --git a/opm/simulators/flow/FlowGenericVanguard.cpp b/opm/simulators/flow/FlowGenericVanguard.cpp index f73ef1955..4762ee46c 100644 --- a/opm/simulators/flow/FlowGenericVanguard.cpp +++ b/opm/simulators/flow/FlowGenericVanguard.cpp @@ -113,30 +113,34 @@ FlowGenericVanguard::FlowGenericVanguard(SimulationModelParams&& params) edgeWeightsMethod_ = Dune::EdgeWeightMethod(Parameters::Get()); #if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA - numJacobiBlocks_ = Parameters::Get(); -#endif + numJacobiBlocks_ = Parameters::Get(); +#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA + + ownersFirst_ = Parameters::Get(); - ownersFirst_ = Parameters::Get(); #if HAVE_MPI - numOverlap_ = Parameters::Get(); - addCorners_ = Parameters::Get(); - partitionMethod_ = Dune::PartitionMethod(Parameters::Get()); - serialPartitioning_ = Parameters::Get(); - zoltanParams_ = Parameters::Get(); + numOverlap_ = Parameters::Get(); + addCorners_ = Parameters::Get(); + partitionMethod_ = Dune::PartitionMethod(Parameters::Get()); + serialPartitioning_ = Parameters::Get(); + zoltanParams_ = Parameters::Get(); - metisParams_ = Parameters::Get(); + metisParams_ = Parameters::Get(); - externalPartitionFile_ = Parameters::Get(); -#endif - enableDistributedWells_ = Parameters::Get(); - enableEclOutput_ = Parameters::Get(); - allow_splitting_inactive_wells_ = Parameters::Get(); - ignoredKeywords_ = Parameters::Get(); - int output_param = Parameters::Get(); - if (output_param >= 0) { - outputInterval_ = output_param; - } - useMultisegmentWell_ = Parameters::Get(); + externalPartitionFile_ = Parameters::Get(); +#endif // HAVE_MPI + + enableDistributedWells_ = Parameters::Get(); + enableEclOutput_ = Parameters::Get(); + allow_splitting_inactive_wells_ = Parameters::Get(); + ignoredKeywords_ = Parameters::Get(); + + const int output_param = Parameters::Get(); + if (output_param >= 0) { + outputInterval_ = output_param; + } + + useMultisegmentWell_ = Parameters::Get(); } FlowGenericVanguard::SimulationModelParams @@ -449,7 +453,7 @@ void FlowGenericVanguard::registerParameters_() #if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA Parameters::Register ("Number of blocks to be created for the Block-Jacobi preconditioner."); -#endif +#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA Parameters::Register ("Order cells owned by rank before ghost/overlap cells."); @@ -489,7 +493,8 @@ void FlowGenericVanguard::registerParameters_() Parameters::Hide>(); Parameters::Hide(); -#endif +#endif // HAVE_MPI + Parameters::Register ("Allow the perforations of a well to be distributed to interior of multiple processes"); Parameters::Register @@ -503,6 +508,6 @@ template void FlowGenericVanguard::registerParameters_(); #if FLOW_INSTANTIATE_FLOAT template void FlowGenericVanguard::registerParameters_(); -#endif +#endif // FLOW_INSTANTIATE_FLOAT } // namespace Opm diff --git a/opm/simulators/flow/FlowGenericVanguard.hpp b/opm/simulators/flow/FlowGenericVanguard.hpp index 097655d17..96012d353 100644 --- a/opm/simulators/flow/FlowGenericVanguard.hpp +++ b/opm/simulators/flow/FlowGenericVanguard.hpp @@ -66,7 +66,7 @@ struct MetisParams { static constexpr auto value = "default"; }; #if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA struct NumJacobiBlocks { static constexpr int value = 0; }; -#endif +#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA struct OwnerCellsFirst { static constexpr bool value = true; }; struct ParsingStrictness { static constexpr auto value = "normal"; }; @@ -248,7 +248,7 @@ public: return numJacobiBlocks_; #else return 0; -#endif +#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA } /*! @@ -297,7 +297,7 @@ public: { return this->externalPartitionFile_; } -#endif +#endif // HAVE_MPI /*! * \brief Whether perforations of a well might be distributed. @@ -367,12 +367,13 @@ protected: #if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA int numJacobiBlocks_{0}; -#endif +#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA bool ownersFirst_; #if HAVE_MPI bool addCorners_; int numOverlap_; + Dune::PartitionMethod partitionMethod_; bool serialPartitioning_; double imbalanceTol_; @@ -384,7 +385,8 @@ protected: std::string metisParams_; std::string externalPartitionFile_{}; -#endif +#endif // HAVE_MPI + bool enableDistributedWells_; bool enableEclOutput_; bool allow_splitting_inactive_wells_; diff --git a/opm/simulators/flow/GenericCpGridVanguard.cpp b/opm/simulators/flow/GenericCpGridVanguard.cpp index 53ed939c2..f9c14a4d2 100644 --- a/opm/simulators/flow/GenericCpGridVanguard.cpp +++ b/opm/simulators/flow/GenericCpGridVanguard.cpp @@ -428,12 +428,14 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeights template void GenericCpGridVanguard::doCreateGrids_(EclipseState& eclState) { - const EclipseGrid* input_grid = nullptr; - std::vector global_porv; + const auto isRoot = this->mpiRank == 0; + // At this stage the ParallelEclipseState instance is still in global // view; on rank 0 we have undistributed data for the entire grid, on // the other ranks the EclipseState is empty. - if (mpiRank == 0) { + const EclipseGrid* input_grid = nullptr; + std::vector global_porv; + if (isRoot) { input_grid = &eclState.getInputGrid(); global_porv = eclState.fieldProps().porv(true); OpmLog::info("\nProcessing grid"); @@ -441,6 +443,7 @@ void GenericCpGridVanguard::doCreateGrids_(Eclips // --- Create grid --- OPM_TIMEBLOCK(createGrids); + #if HAVE_MPI this->grid_ = std::make_unique(FlowGenericVanguard::comm()); #else @@ -448,14 +451,14 @@ void GenericCpGridVanguard::doCreateGrids_(Eclips #endif // Note: removed_cells is guaranteed to be empty on ranks other than 0. - auto removed_cells = - this->grid_->processEclipseFormat(input_grid, - &eclState, - /*isPeriodic=*/false, - /*flipNormals=*/false, - /*clipZ=*/false); + auto removed_cells = this->grid_ + ->processEclipseFormat(input_grid, + &eclState, + /* isPeriodic = */ false, + /* flipNormals = */ false, + /* clipZ = */ false); - if (mpiRank == 0) { + if (isRoot) { const auto& active_porv = eclState.fieldProps().porv(false); const auto& unit_system = eclState.getUnits(); const auto& volume_unit = unit_system.name( UnitSystem::measure::volume); @@ -480,45 +483,38 @@ void GenericCpGridVanguard::doCreateGrids_(Eclips // --- Add LGRs and update Leaf Grid View --- // Check if input file contains Lgrs. - const auto& lgrs = eclState.getLgrs(); - const auto lgrsSize = lgrs.size(); + // // If there are lgrs, create the grid with them, and update the leaf grid view. - if (lgrsSize) - { + if (const auto& lgrs = eclState.getLgrs(); lgrs.size() > 0) { OpmLog::info("\nAdding LGRs to the grid and updating its leaf grid view"); - this->addLgrsUpdateLeafView(lgrs, lgrsSize, *(this->grid_)); + this->addLgrsUpdateLeafView(lgrs, lgrs.size(), *this->grid_); } #if HAVE_MPI - { - const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer(); - int mpiSize = 1; - MPI_Comm_size(grid_->comm(), &mpiSize); + if (this->grid_->comm().size() > 1) { + // Numerical aquifers generate new NNCs during grid processing. We + // need to distribute these NNCs from the root process to all other + // MPI processes. + if (eclState.aquifer().hasNumericalAquifer()) { + auto nnc_input = eclState.getInputNNC(); + Parallel::MpiSerializer ser(this->grid_->comm()); + ser.broadcast(nnc_input); - // when there is numerical aquifers, new NNC are generated during - // grid processing we need to pass the NNC from root process to - // other processes - if ( mpiSize > 1) - { - if (has_numerical_aquifer) { - auto nnc_input = eclState.getInputNNC(); - Parallel::MpiSerializer ser(grid_->comm()); - ser.broadcast(nnc_input); - if (mpiRank > 0) { - eclState.setInputNNC(nnc_input); - } + if (! isRoot) { + eclState.setInputNNC(nnc_input); } - bool hasPinchNnc = eclState.hasPinchNNC(); - grid_->comm().broadcast(&hasPinchNnc, 1, 0); + } - if(hasPinchNnc) - { - auto pinch_nnc = eclState.getPinchNNC(); - Parallel::MpiSerializer ser(grid_->comm()); - ser.broadcast(pinch_nnc); - if (mpiRank > 0) { - eclState.setPinchNNC(std::move(pinch_nnc)); - } + bool hasPinchNnc = eclState.hasPinchNNC(); + grid_->comm().broadcast(&hasPinchNnc, 1, 0); + + if (hasPinchNnc) { + auto pinch_nnc = eclState.getPinchNNC(); + Parallel::MpiSerializer ser(this->grid_->comm()); + ser.broadcast(pinch_nnc); + + if (! isRoot) { + eclState.setPinchNNC(std::move(pinch_nnc)); } } } @@ -533,21 +529,20 @@ void GenericCpGridVanguard::doCreateGrids_(Eclips // // After loadbalance, grid_ will contain a global and distribute view. // equilGrid_ being a shallow copy only the global view. - if (mpiRank == 0) - { - equilGrid_.reset(new Dune::CpGrid(*grid_)); - equilCartesianIndexMapper_ = std::make_unique(*equilGrid_); + if (isRoot) { + this->equilGrid_ = std::make_unique(*this->grid_); + this->equilCartesianIndexMapper_ = + std::make_unique(*this->equilGrid_); - eclState.reset_actnum(UgGridHelpers::createACTNUM(*grid_)); + eclState.reset_actnum(UgGridHelpers::createACTNUM(*this->grid_)); eclState.set_active_indices(this->grid_->globalCell()); } { auto size = removed_cells.size(); - this->grid_->comm().broadcast(&size, 1, 0); - if (mpiRank != 0) { + if (! isRoot) { removed_cells.resize(size); } diff --git a/opm/simulators/flow/PolyhedralGridVanguard.hpp b/opm/simulators/flow/PolyhedralGridVanguard.hpp index 98d0493e3..b9f456579 100644 --- a/opm/simulators/flow/PolyhedralGridVanguard.hpp +++ b/opm/simulators/flow/PolyhedralGridVanguard.hpp @@ -236,11 +236,17 @@ public: // not required for this type of grid yet (only from bdaBridge??) return {}; } + protected: void createGrids_() { - grid_ = std::make_unique(this->eclState().getInputGrid(), this->eclState().fieldProps().porv(true)); - cartesianIndexMapper_ = std::make_unique(*grid_); + this->grid_ = std::make_unique + (this->eclState().getInputGrid(), + this->eclState().fieldProps().porv(true)); + + this->cartesianIndexMapper_ = + std::make_unique(*this->grid_); + this->updateGridView_(); this->updateCartesianToCompressedMapping_(); this->updateCellDepths_();