Merge pull request #3571 from atgeirr/allow-non-world-comm

Replacing use of MPI_COMM_WORLD with a variable communicator.
This commit is contained in:
Markus Blatt
2021-10-06 15:52:17 +02:00
committed by GitHub
51 changed files with 554 additions and 430 deletions

View File

@@ -757,7 +757,7 @@ CollectDataToIORank(const Grid& grid, const EquilGrid* equilGrid,
const GridView& localGridView,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper)
: toIORankComm_()
: toIORankComm_(grid.comm())
{
// index maps only have to be build when reordering is needed
if (!needsReordering && !isParallel())

View File

@@ -58,7 +58,7 @@ template<class ElementMapper, class GridView, class Scalar>
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard()
{
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
MPI_Comm_rank(EclGenericVanguard::comm(), &mpiRank);
#else
mpiRank = 0;
#endif
@@ -85,7 +85,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doLoadBalance_(Dun
EclGenericVanguard::ParallelWellStruct& parallelWells)
{
int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) {
// the CpGrid's loadBalance() method likes to have the transmissibilities as
@@ -185,7 +185,7 @@ template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1)
{
int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) {
try
@@ -221,7 +221,12 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
OpmLog::info("\nProcessing grid");
}
#if HAVE_MPI
grid_.reset(new Dune::CpGrid(EclGenericVanguard::comm()));
#else
grid_.reset(new Dune::CpGrid());
#endif
const auto& removed_cells = grid_->processEclipseFormat(input_grid,
&eclState,
/*isPeriodic=*/false,
@@ -256,12 +261,12 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
{
const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer();
int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
MPI_Comm_size(grid_->comm(), &mpiSize);
// when there is numerical aquifers, new NNC are generated during grid processing
// we need to pass the NNC from root process to other processes
if (has_numerical_aquifer && mpiSize > 1) {
auto nnc_input = eclState.getInputNNC();
EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication());
EclMpiSerializer ser(grid_->comm());
ser.broadcast(nnc_input);
if (mpiRank > 0) {
eclState.setInputNNC(nnc_input);
@@ -306,7 +311,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doFilterConnection
{
// Broadcast another time to remove inactive peforations on
// slave processors.
eclScheduleBroadcast(schedule);
eclScheduleBroadcast(EclGenericVanguard::comm(), schedule);
}
catch(const std::exception& broadcast_error)
{

View File

@@ -62,7 +62,7 @@ std::shared_ptr<Schedule> EclGenericVanguard::externalEclSchedule_;
std::shared_ptr<SummaryConfig> EclGenericVanguard::externalEclSummaryConfig_;
std::unique_ptr<UDQState> EclGenericVanguard::externalUDQState_;
std::unique_ptr<Action::State> EclGenericVanguard::externalActionState_;
std::unique_ptr<EclGenericVanguard::CommunicationType> EclGenericVanguard::comm_;
std::unique_ptr<Parallel::Communication> EclGenericVanguard::comm_;
EclGenericVanguard::EclGenericVanguard()
: python(std::make_shared<Python>())
@@ -224,12 +224,6 @@ void EclGenericVanguard::updateOutputDir_(std::string outputDir,
void EclGenericVanguard::init()
{
int myRank = 0;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
#endif
// Make proper case name.
{
if (fileName_.empty())
@@ -295,7 +289,7 @@ void EclGenericVanguard::init()
parseContext_ = createParseContext(ignoredKeywords_, eclStrictParsing_);
}
readDeck(myRank, fileName_, deck_, eclState_, eclSchedule_, udqState_, actionState_,
readDeck(EclGenericVanguard::comm(), fileName_, deck_, eclState_, eclSchedule_, udqState_, actionState_,
eclSummaryConfig_, std::move(errorGuard), python,
std::move(parseContext_), /* initFromRestart = */ false,
/* checkDeck = */ enableExperiments_, outputInterval_);
@@ -326,10 +320,11 @@ void EclGenericVanguard::init()
if (enableDistributedWells() )
{
int hasMsWell = false;
const auto& comm = EclGenericVanguard::comm();
if (useMultisegmentWell_)
{
if (myRank == 0)
if (comm.rank() == 0)
{
const auto& wells = this->schedule().getWellsatEnd();
for ( const auto& well: wells)
@@ -338,16 +333,12 @@ void EclGenericVanguard::init()
}
}
}
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
const auto& comm = Dune::MPIHelper::getCommunication();
#else
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
#endif
hasMsWell = comm.max(hasMsWell);
if (hasMsWell)
{
if (myRank == 0)
if (comm.rank() == 0)
{
std::string message =
std::string("Option --allow-distributed-wells=true is only allowed if model\n")

View File

@@ -29,9 +29,8 @@
#include <opm/grid/common/GridEnums.hpp>
#include <dune/common/version.hh>
#include <opm/simulators/utils/ParallelCommunication.hpp>
#include <dune/common/parallel/collectivecommunication.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <array>
#include <cassert>
@@ -44,6 +43,7 @@
#include <utility>
#include <vector>
namespace Opm {
namespace Action { class State; }
@@ -62,12 +62,6 @@ class EclGenericVanguard {
public:
using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using CommunicationType = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using CommunicationType = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
/*!
* \brief Constructor.
* \details Needs to be in compile unit.
@@ -277,11 +271,11 @@ public:
{ return parallelWells_; }
//! \brief Set global communication.
static void setCommunication(std::unique_ptr<CommunicationType> comm)
static void setCommunication(std::unique_ptr<Opm::Parallel::Communication> comm)
{ comm_ = std::move(comm); }
//! \brief Obtain global communicator.
static CommunicationType& comm()
static Parallel::Communication& comm()
{
assert(comm_);
return *comm_;
@@ -310,7 +304,7 @@ protected:
static bool externalDeckSet_;
static std::unique_ptr<UDQState> externalUDQState_;
static std::unique_ptr<Action::State> externalActionState_;
static std::unique_ptr<CommunicationType> comm_;
static std::unique_ptr<Parallel::Communication> comm_;
std::string caseName_;
std::string fileName_;

View File

@@ -512,11 +512,11 @@ evalSummary(int reportStepNum,
if (collectToIORank_.isParallel()) {
#ifdef HAVE_MPI
unsigned long buffer_size = buffer.size();
MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED_LONG, collectToIORank_.ioRank, MPI_COMM_WORLD);
grid_.comm().broadcast(&buffer_size, 1, collectToIORank_.ioRank);
if (!collectToIORank_.isIORank())
buffer.resize( buffer_size );
MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, collectToIORank_.ioRank, MPI_COMM_WORLD);
grid_.comm().broadcast(buffer.data(), buffer_size, collectToIORank_.ioRank);
if (!collectToIORank_.isIORank()) {
SummaryState& st = summaryState;
st.deserialize(buffer);

View File

@@ -21,6 +21,7 @@
#ifndef ECL_MPI_SERIALIZER_HH
#define ECL_MPI_SERIALIZER_HH
#include <dune/common/version.hh>
#include <opm/simulators/utils/ParallelRestart.hpp>
#include <optional>
@@ -39,7 +40,7 @@ class EclMpiSerializer {
public:
//! \brief Constructor.
//! \param comm The global communicator to broadcast using
explicit EclMpiSerializer(Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm) :
explicit EclMpiSerializer(Opm::Parallel::Communication comm) :
m_comm(comm)
{}

View File

@@ -2121,7 +2121,7 @@ private:
Scalar>(fs, iq.pvtRegionIndex());
}
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::_updateCompositionLayers() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::_updateCompositionLayers() failed: ", this->simulator().vanguard().grid().comm());
}
bool updateMaxOilSaturation_()
@@ -2150,7 +2150,7 @@ private:
this->maxOilSaturation_[compressedDofIdx] = std::max(this->maxOilSaturation_[compressedDofIdx], So);
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayOilSaturation() failed:");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayOilSaturation() failed:", vanguard.grid().comm());
// we need to invalidate the intensive quantities cache here because the
// derivatives of Rs and Rv will most likely have changed
return true;
@@ -2184,7 +2184,7 @@ private:
Scalar Sw = decay<Scalar>(fs.saturation(waterPhaseIdx));
this->maxWaterSaturation_[compressedDofIdx] = std::max(this->maxWaterSaturation_[compressedDofIdx], Sw);
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayWaterSaturation() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayWaterSaturation() failed: ", vanguard.grid().comm());
return true;
}
@@ -2214,8 +2214,7 @@ private:
std::min(this->minOilPressure_[compressedDofIdx],
getValue(fs.pressure(oilPhaseIdx)));
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMinPressure_() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMinPressure_() failed: ", this->simulator().vanguard().grid().comm());
return true;
}
@@ -2658,7 +2657,7 @@ private:
const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
materialLawManager_->updateHysteresis(intQuants.fluidState(), compressedDofIdx);
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateHyteresis_(): ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateHyteresis_(): ", vanguard.grid().comm());
return true;
}
@@ -2683,7 +2682,7 @@ private:
this->maxPolymerAdsorption_[compressedDofIdx] = std::max(this->maxPolymerAdsorption_[compressedDofIdx],
scalarValue(intQuants.polymerAdsorption()));
}
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMaxPolymerAdsorption_(): ");
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMaxPolymerAdsorption_(): ", vanguard.grid().comm());
}
struct PffDofData_

View File

@@ -420,7 +420,7 @@ public:
for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx)
wells_[wellIdx]->beginIterationAccumulate(elemCtx, /*timeIdx=*/0);
}
OPM_END_PARALLEL_TRY_CATCH("EclWellManager::beginIteration() failed: ");
OPM_END_PARALLEL_TRY_CATCH("EclWellManager::beginIteration() failed: ", simulator_.vanguard().grid().comm());
// call the postprocessing routines
for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx)

View File

@@ -393,7 +393,7 @@ private:
eclOutputModule_->processElement(elemCtx);
}
OPM_END_PARALLEL_TRY_CATCH("EclWriter::prepareLocalCellData() failed: ")
OPM_END_PARALLEL_TRY_CATCH("EclWriter::prepareLocalCellData() failed: ", simulator_.vanguard().grid().comm())
}
Simulator& simulator_;