Replacing use of MPI_COMM_WORLD with a variable communicator.

This commit is contained in:
Elyes Ahmed
2021-05-25 12:57:11 +02:00
committed by Atgeirr Flø Rasmussen
parent 61ef539bf5
commit f53c597f90
48 changed files with 584 additions and 420 deletions

View File

@@ -757,7 +757,7 @@ CollectDataToIORank(const Grid& grid, const EquilGrid* equilGrid,
const GridView& localGridView,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper)
: toIORankComm_()
: toIORankComm_(grid.comm())
{
// index maps only have to be build when reordering is needed
if (!needsReordering && !isParallel())

View File

@@ -58,7 +58,7 @@ template<class ElementMapper, class GridView, class Scalar>
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard()
{
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
MPI_Comm_rank(EclGenericVanguard::comm(), &mpiRank);
#else
mpiRank = 0;
#endif
@@ -85,7 +85,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doLoadBalance_(Dun
EclGenericVanguard::ParallelWellStruct& parallelWells)
{
int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) {
// the CpGrid's loadBalance() method likes to have the transmissibilities as
@@ -188,7 +188,7 @@ template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1)
{
int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) {
try
@@ -230,7 +230,12 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
OpmLog::info("\nProcessing grid");
}
#if HAVE_MPI
grid_.reset(new Dune::CpGrid(EclGenericVanguard::comm()));
#else
grid_.reset(new Dune::CpGrid());
#endif
const auto& removed_cells = grid_->processEclipseFormat(input_grid,
&eclState,
/*isPeriodic=*/false,
@@ -262,12 +267,13 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
{
const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer();
int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
MPI_Comm_size(grid_->comm(), &mpiSize);
// when there is numerical aquifers, new NNC are generated during grid processing
// we need to pass the NNC from root process to other processes
if (has_numerical_aquifer && mpiSize > 1) {
auto nnc_input = eclState.getInputNNC();
EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication());
const auto& comm_nnc = Opm::Parallel::Communication();
EclMpiSerializer ser(comm_nnc);
ser.broadcast(nnc_input);
if (mpiRank > 0) {
eclState.setInputNNC(nnc_input);
@@ -312,7 +318,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doFilterConnection
{
// Broadcast another time to remove inactive peforations on
// slave processors.
eclScheduleBroadcast(schedule);
eclScheduleBroadcast(EclGenericVanguard::comm(), schedule);
}
catch(const std::exception& broadcast_error)
{

View File

@@ -62,7 +62,7 @@ std::shared_ptr<Schedule> EclGenericVanguard::externalEclSchedule_;
std::shared_ptr<SummaryConfig> EclGenericVanguard::externalEclSummaryConfig_;
std::unique_ptr<UDQState> EclGenericVanguard::externalUDQState_;
std::unique_ptr<Action::State> EclGenericVanguard::externalActionState_;
std::unique_ptr<EclGenericVanguard::CommunicationType> EclGenericVanguard::comm_;
std::unique_ptr<Parallel::Communication> EclGenericVanguard::comm_;
EclGenericVanguard::EclGenericVanguard()
: python(std::make_shared<Python>())
@@ -295,7 +295,7 @@ void EclGenericVanguard::init()
parseContext_ = createParseContext(ignoredKeywords_, eclStrictParsing_);
}
readDeck(myRank, fileName_, deck_, eclState_, eclSchedule_, udqState_, actionState_,
readDeck(EclGenericVanguard::comm(), fileName_, deck_, eclState_, eclSchedule_, udqState_, actionState_,
eclSummaryConfig_, std::move(errorGuard), python,
std::move(parseContext_), /* initFromRestart = */ false,
/* checkDeck = */ enableExperiments_, outputInterval_);
@@ -338,11 +338,8 @@ void EclGenericVanguard::init()
}
}
}
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
const auto& comm = Dune::MPIHelper::getCommunication();
#else
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
#endif
const auto& comm = Parallel::Communication();
hasMsWell = comm.max(hasMsWell);
if (hasMsWell)

View File

@@ -44,6 +44,14 @@
#include <utility>
#include <vector>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
}
namespace Opm {
namespace Action { class State; }
@@ -60,13 +68,8 @@ class UDQState;
class EclGenericVanguard {
public:
using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using CommunicationType = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using CommunicationType = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
/*!
* \brief Constructor.
@@ -277,11 +280,11 @@ public:
{ return parallelWells_; }
//! \brief Set global communication.
static void setCommunication(std::unique_ptr<CommunicationType> comm)
static void setCommunication(std::unique_ptr<Opm::Parallel::Communication> comm)
{ comm_ = std::move(comm); }
//! \brief Obtain global communicator.
static CommunicationType& comm()
static Parallel::Communication& comm()
{
assert(comm_);
return *comm_;
@@ -310,7 +313,7 @@ protected:
static bool externalDeckSet_;
static std::unique_ptr<UDQState> externalUDQState_;
static std::unique_ptr<Action::State> externalActionState_;
static std::unique_ptr<CommunicationType> comm_;
static std::unique_ptr<Parallel::Communication> comm_;
std::string caseName_;
std::string fileName_;

View File

@@ -512,11 +512,11 @@ evalSummary(int reportStepNum,
if (collectToIORank_.isParallel()) {
#ifdef HAVE_MPI
unsigned long buffer_size = buffer.size();
MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED_LONG, collectToIORank_.ioRank, MPI_COMM_WORLD);
MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED_LONG, collectToIORank_.ioRank, grid_.comm());
if (!collectToIORank_.isIORank())
buffer.resize( buffer_size );
MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, collectToIORank_.ioRank, MPI_COMM_WORLD);
MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, collectToIORank_.ioRank, grid_.comm());
if (!collectToIORank_.isIORank()) {
SummaryState& st = summaryState;
st.deserialize(buffer);

View File

@@ -21,6 +21,7 @@
#ifndef ECL_MPI_SERIALIZER_HH
#define ECL_MPI_SERIALIZER_HH
#include <dune/common/version.hh>
#include <opm/simulators/utils/ParallelRestart.hpp>
#include <optional>
@@ -39,7 +40,7 @@ class EclMpiSerializer {
public:
//! \brief Constructor.
//! \param comm The global communicator to broadcast using
explicit EclMpiSerializer(Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm) :
explicit EclMpiSerializer(Opm::Parallel::Communication comm) :
m_comm(comm)
{}

View File

@@ -2121,7 +2121,7 @@ private:
Scalar>(fs, iq.pvtRegionIndex());
}
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::_updateCompositionLayers() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::_updateCompositionLayers() failed: ", this->simulator().vanguard().grid().comm());
}
bool updateMaxOilSaturation_()
@@ -2150,7 +2150,7 @@ private:
this->maxOilSaturation_[compressedDofIdx] = std::max(this->maxOilSaturation_[compressedDofIdx], So);
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayOilSaturation() failed:");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayOilSaturation() failed:", vanguard.grid().comm());
// we need to invalidate the intensive quantities cache here because the
// derivatives of Rs and Rv will most likely have changed
return true;
@@ -2184,7 +2184,7 @@ private:
Scalar Sw = decay<Scalar>(fs.saturation(waterPhaseIdx));
this->maxWaterSaturation_[compressedDofIdx] = std::max(this->maxWaterSaturation_[compressedDofIdx], Sw);
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayWaterSaturation() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayWaterSaturation() failed: ", vanguard.grid().comm());
return true;
}
@@ -2214,8 +2214,7 @@ private:
std::min(this->minOilPressure_[compressedDofIdx],
getValue(fs.pressure(oilPhaseIdx)));
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMinPressure_() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMinPressure_() failed: ", this->simulator().vanguard().grid().comm());
return true;
}
@@ -2658,7 +2657,7 @@ private:
const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
materialLawManager_->updateHysteresis(intQuants.fluidState(), compressedDofIdx);
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateHyteresis_(): ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateHyteresis_(): ", vanguard.grid().comm());
return true;
}
@@ -2683,7 +2682,7 @@ private:
this->maxPolymerAdsorption_[compressedDofIdx] = std::max(this->maxPolymerAdsorption_[compressedDofIdx],
scalarValue(intQuants.polymerAdsorption()));
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMaxPolymerAdsorption_(): ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMaxPolymerAdsorption_(): ", vanguard.grid().comm());
}
struct PffDofData_

View File

@@ -420,7 +420,7 @@ public:
for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx)
wells_[wellIdx]->beginIterationAccumulate(elemCtx, /*timeIdx=*/0);
}
OPM_END_PARALLEL_TRY_CATCH("EclWellManager::beginIteration() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclWellManager::beginIteration() failed: ", simulator_.vanguard().grid().comm());
// call the postprocessing routines
for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx)

View File

@@ -393,7 +393,7 @@ private:
eclOutputModule_->processElement(elemCtx);
}
OPM_END_PARALLEL_TRY_CATCH("EclWriter::prepareLocalCellData() failed: ")
OPM_END_PARALLEL_TRY_CATCH("EclWriter::prepareLocalCellData() failed: ", simulator_.vanguard().grid().comm())
}
Simulator& simulator_;