Replacing use of MPI_COMM_WORLD with a variable communicator.

This commit is contained in:
Elyes Ahmed 2021-05-25 12:57:11 +02:00 committed by Atgeirr Flø Rasmussen
parent 61ef539bf5
commit f53c597f90
48 changed files with 584 additions and 420 deletions

View File

@ -757,7 +757,7 @@ CollectDataToIORank(const Grid& grid, const EquilGrid* equilGrid,
const GridView& localGridView, const GridView& localGridView,
const Dune::CartesianIndexMapper<Grid>& cartMapper, const Dune::CartesianIndexMapper<Grid>& cartMapper,
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper) const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper)
: toIORankComm_() : toIORankComm_(grid.comm())
{ {
// index maps only have to be build when reordering is needed // index maps only have to be build when reordering is needed
if (!needsReordering && !isParallel()) if (!needsReordering && !isParallel())

View File

@ -58,7 +58,7 @@ template<class ElementMapper, class GridView, class Scalar>
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard() EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard()
{ {
#if HAVE_MPI #if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_rank(EclGenericVanguard::comm(), &mpiRank);
#else #else
mpiRank = 0; mpiRank = 0;
#endif #endif
@ -85,7 +85,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doLoadBalance_(Dun
EclGenericVanguard::ParallelWellStruct& parallelWells) EclGenericVanguard::ParallelWellStruct& parallelWells)
{ {
int mpiSize = 1; int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) { if (mpiSize > 1) {
// the CpGrid's loadBalance() method likes to have the transmissibilities as // the CpGrid's loadBalance() method likes to have the transmissibilities as
@ -188,7 +188,7 @@ template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1) void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1)
{ {
int mpiSize = 1; int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) { if (mpiSize > 1) {
try try
@ -230,7 +230,12 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
OpmLog::info("\nProcessing grid"); OpmLog::info("\nProcessing grid");
} }
#if HAVE_MPI
grid_.reset(new Dune::CpGrid(EclGenericVanguard::comm()));
#else
grid_.reset(new Dune::CpGrid()); grid_.reset(new Dune::CpGrid());
#endif
const auto& removed_cells = grid_->processEclipseFormat(input_grid, const auto& removed_cells = grid_->processEclipseFormat(input_grid,
&eclState, &eclState,
/*isPeriodic=*/false, /*isPeriodic=*/false,
@ -262,12 +267,13 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
{ {
const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer(); const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer();
int mpiSize = 1; int mpiSize = 1;
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); MPI_Comm_size(grid_->comm(), &mpiSize);
// when there is numerical aquifers, new NNC are generated during grid processing // when there is numerical aquifers, new NNC are generated during grid processing
// we need to pass the NNC from root process to other processes // we need to pass the NNC from root process to other processes
if (has_numerical_aquifer && mpiSize > 1) { if (has_numerical_aquifer && mpiSize > 1) {
auto nnc_input = eclState.getInputNNC(); auto nnc_input = eclState.getInputNNC();
EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication()); const auto& comm_nnc = Opm::Parallel::Communication();
EclMpiSerializer ser(comm_nnc);
ser.broadcast(nnc_input); ser.broadcast(nnc_input);
if (mpiRank > 0) { if (mpiRank > 0) {
eclState.setInputNNC(nnc_input); eclState.setInputNNC(nnc_input);
@ -312,7 +318,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doFilterConnection
{ {
// Broadcast another time to remove inactive peforations on // Broadcast another time to remove inactive peforations on
// slave processors. // slave processors.
eclScheduleBroadcast(schedule); eclScheduleBroadcast(EclGenericVanguard::comm(), schedule);
} }
catch(const std::exception& broadcast_error) catch(const std::exception& broadcast_error)
{ {

View File

@ -62,7 +62,7 @@ std::shared_ptr<Schedule> EclGenericVanguard::externalEclSchedule_;
std::shared_ptr<SummaryConfig> EclGenericVanguard::externalEclSummaryConfig_; std::shared_ptr<SummaryConfig> EclGenericVanguard::externalEclSummaryConfig_;
std::unique_ptr<UDQState> EclGenericVanguard::externalUDQState_; std::unique_ptr<UDQState> EclGenericVanguard::externalUDQState_;
std::unique_ptr<Action::State> EclGenericVanguard::externalActionState_; std::unique_ptr<Action::State> EclGenericVanguard::externalActionState_;
std::unique_ptr<EclGenericVanguard::CommunicationType> EclGenericVanguard::comm_; std::unique_ptr<Parallel::Communication> EclGenericVanguard::comm_;
EclGenericVanguard::EclGenericVanguard() EclGenericVanguard::EclGenericVanguard()
: python(std::make_shared<Python>()) : python(std::make_shared<Python>())
@ -295,7 +295,7 @@ void EclGenericVanguard::init()
parseContext_ = createParseContext(ignoredKeywords_, eclStrictParsing_); parseContext_ = createParseContext(ignoredKeywords_, eclStrictParsing_);
} }
readDeck(myRank, fileName_, deck_, eclState_, eclSchedule_, udqState_, actionState_, readDeck(EclGenericVanguard::comm(), fileName_, deck_, eclState_, eclSchedule_, udqState_, actionState_,
eclSummaryConfig_, std::move(errorGuard), python, eclSummaryConfig_, std::move(errorGuard), python,
std::move(parseContext_), /* initFromRestart = */ false, std::move(parseContext_), /* initFromRestart = */ false,
/* checkDeck = */ enableExperiments_, outputInterval_); /* checkDeck = */ enableExperiments_, outputInterval_);
@ -338,11 +338,8 @@ void EclGenericVanguard::init()
} }
} }
} }
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
const auto& comm = Dune::MPIHelper::getCommunication(); const auto& comm = Parallel::Communication();
#else
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
#endif
hasMsWell = comm.max(hasMsWell); hasMsWell = comm.max(hasMsWell);
if (hasMsWell) if (hasMsWell)

View File

@ -44,6 +44,14 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
}
namespace Opm { namespace Opm {
namespace Action { class State; } namespace Action { class State; }
@ -60,13 +68,8 @@ class UDQState;
class EclGenericVanguard { class EclGenericVanguard {
public: public:
using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
using CommunicationType = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using CommunicationType = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
/*! /*!
* \brief Constructor. * \brief Constructor.
@ -277,11 +280,11 @@ public:
{ return parallelWells_; } { return parallelWells_; }
//! \brief Set global communication. //! \brief Set global communication.
static void setCommunication(std::unique_ptr<CommunicationType> comm) static void setCommunication(std::unique_ptr<Opm::Parallel::Communication> comm)
{ comm_ = std::move(comm); } { comm_ = std::move(comm); }
//! \brief Obtain global communicator. //! \brief Obtain global communicator.
static CommunicationType& comm() static Parallel::Communication& comm()
{ {
assert(comm_); assert(comm_);
return *comm_; return *comm_;
@ -310,7 +313,7 @@ protected:
static bool externalDeckSet_; static bool externalDeckSet_;
static std::unique_ptr<UDQState> externalUDQState_; static std::unique_ptr<UDQState> externalUDQState_;
static std::unique_ptr<Action::State> externalActionState_; static std::unique_ptr<Action::State> externalActionState_;
static std::unique_ptr<CommunicationType> comm_; static std::unique_ptr<Parallel::Communication> comm_;
std::string caseName_; std::string caseName_;
std::string fileName_; std::string fileName_;

View File

@ -512,11 +512,11 @@ evalSummary(int reportStepNum,
if (collectToIORank_.isParallel()) { if (collectToIORank_.isParallel()) {
#ifdef HAVE_MPI #ifdef HAVE_MPI
unsigned long buffer_size = buffer.size(); unsigned long buffer_size = buffer.size();
MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED_LONG, collectToIORank_.ioRank, MPI_COMM_WORLD); MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED_LONG, collectToIORank_.ioRank, grid_.comm());
if (!collectToIORank_.isIORank()) if (!collectToIORank_.isIORank())
buffer.resize( buffer_size ); buffer.resize( buffer_size );
MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, collectToIORank_.ioRank, MPI_COMM_WORLD); MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, collectToIORank_.ioRank, grid_.comm());
if (!collectToIORank_.isIORank()) { if (!collectToIORank_.isIORank()) {
SummaryState& st = summaryState; SummaryState& st = summaryState;
st.deserialize(buffer); st.deserialize(buffer);

View File

@ -21,6 +21,7 @@
#ifndef ECL_MPI_SERIALIZER_HH #ifndef ECL_MPI_SERIALIZER_HH
#define ECL_MPI_SERIALIZER_HH #define ECL_MPI_SERIALIZER_HH
#include <dune/common/version.hh>
#include <opm/simulators/utils/ParallelRestart.hpp> #include <opm/simulators/utils/ParallelRestart.hpp>
#include <optional> #include <optional>
@ -39,7 +40,7 @@ class EclMpiSerializer {
public: public:
//! \brief Constructor. //! \brief Constructor.
//! \param comm The global communicator to broadcast using //! \param comm The global communicator to broadcast using
explicit EclMpiSerializer(Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm) : explicit EclMpiSerializer(Opm::Parallel::Communication comm) :
m_comm(comm) m_comm(comm)
{} {}

View File

@ -2121,7 +2121,7 @@ private:
Scalar>(fs, iq.pvtRegionIndex()); Scalar>(fs, iq.pvtRegionIndex());
} }
} }
OPM_END_PARALLEL_TRY_CATCH("EclProblem::_updateCompositionLayers() failed: "); OPM_END_PARALLEL_TRY_CATCH("EclProblem::_updateCompositionLayers() failed: ", this->simulator().vanguard().grid().comm());
} }
bool updateMaxOilSaturation_() bool updateMaxOilSaturation_()
@ -2150,7 +2150,7 @@ private:
this->maxOilSaturation_[compressedDofIdx] = std::max(this->maxOilSaturation_[compressedDofIdx], So); this->maxOilSaturation_[compressedDofIdx] = std::max(this->maxOilSaturation_[compressedDofIdx], So);
} }
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayOilSaturation() failed:"); OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayOilSaturation() failed:", vanguard.grid().comm());
// we need to invalidate the intensive quantities cache here because the // we need to invalidate the intensive quantities cache here because the
// derivatives of Rs and Rv will most likely have changed // derivatives of Rs and Rv will most likely have changed
return true; return true;
@ -2184,7 +2184,7 @@ private:
Scalar Sw = decay<Scalar>(fs.saturation(waterPhaseIdx)); Scalar Sw = decay<Scalar>(fs.saturation(waterPhaseIdx));
this->maxWaterSaturation_[compressedDofIdx] = std::max(this->maxWaterSaturation_[compressedDofIdx], Sw); this->maxWaterSaturation_[compressedDofIdx] = std::max(this->maxWaterSaturation_[compressedDofIdx], Sw);
} }
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayWaterSaturation() failed: "); OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMayWaterSaturation() failed: ", vanguard.grid().comm());
return true; return true;
} }
@ -2214,8 +2214,7 @@ private:
std::min(this->minOilPressure_[compressedDofIdx], std::min(this->minOilPressure_[compressedDofIdx],
getValue(fs.pressure(oilPhaseIdx))); getValue(fs.pressure(oilPhaseIdx)));
} }
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMinPressure_() failed: ", this->simulator().vanguard().grid().comm());
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMinPressure_() failed: ");
return true; return true;
} }
@ -2658,7 +2657,7 @@ private:
const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0); const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
materialLawManager_->updateHysteresis(intQuants.fluidState(), compressedDofIdx); materialLawManager_->updateHysteresis(intQuants.fluidState(), compressedDofIdx);
} }
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateHyteresis_(): "); OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateHyteresis_(): ", vanguard.grid().comm());
return true; return true;
} }
@ -2683,7 +2682,7 @@ private:
this->maxPolymerAdsorption_[compressedDofIdx] = std::max(this->maxPolymerAdsorption_[compressedDofIdx], this->maxPolymerAdsorption_[compressedDofIdx] = std::max(this->maxPolymerAdsorption_[compressedDofIdx],
scalarValue(intQuants.polymerAdsorption())); scalarValue(intQuants.polymerAdsorption()));
} }
OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMaxPolymerAdsorption_(): "); OPM_END_PARALLEL_TRY_CATCH("EclProblem::updateMaxPolymerAdsorption_(): ", vanguard.grid().comm());
} }
struct PffDofData_ struct PffDofData_

View File

@ -420,7 +420,7 @@ public:
for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx) for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx)
wells_[wellIdx]->beginIterationAccumulate(elemCtx, /*timeIdx=*/0); wells_[wellIdx]->beginIterationAccumulate(elemCtx, /*timeIdx=*/0);
} }
OPM_END_PARALLEL_TRY_CATCH("EclWellManager::beginIteration() failed: "); OPM_END_PARALLEL_TRY_CATCH("EclWellManager::beginIteration() failed: ", simulator_.vanguard().grid().comm());
// call the postprocessing routines // call the postprocessing routines
for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx) for (size_t wellIdx = 0; wellIdx < wellSize; ++wellIdx)

View File

@ -393,7 +393,7 @@ private:
eclOutputModule_->processElement(elemCtx); eclOutputModule_->processElement(elemCtx);
} }
OPM_END_PARALLEL_TRY_CATCH("EclWriter::prepareLocalCellData() failed: ") OPM_END_PARALLEL_TRY_CATCH("EclWriter::prepareLocalCellData() failed: ", simulator_.vanguard().grid().comm())
} }
Simulator& simulator_; Simulator& simulator_;

View File

@ -128,7 +128,8 @@ public:
const auto& iq = elemCtx.intensiveQuantities(0, 0); const auto& iq = elemCtx.intensiveQuantities(0, 0);
pressure_previous_[idx] = getValue(iq.fluidState().pressure(phaseIdx_())); pressure_previous_[idx] = getValue(iq.fluidState().pressure(phaseIdx_()));
} }
OPM_END_PARALLEL_TRY_CATCH("AquiferInterface::beginTimeStep() failed: ");
OPM_END_PARALLEL_TRY_CATCH("AquiferInterface::beginTimeStep() failed: ", ebos_simulator_.vanguard().grid().comm());
} }
template <class Context> template <class Context>

View File

@ -228,8 +228,7 @@ private:
cell_pressure[idx] = water_pressure_reservoir; cell_pressure[idx] = water_pressure_reservoir;
} }
OPM_END_PARALLEL_TRY_CATCH("AquiferNumerical::calculateAquiferPressure() failed: ", this->ebos_simulator_.vanguard().grid().comm());
OPM_END_PARALLEL_TRY_CATCH("AquiferNumerical::calculateAquiferPressure() failed: ");
const auto& comm = this->ebos_simulator_.vanguard().grid().comm(); const auto& comm = this->ebos_simulator_.vanguard().grid().comm();
comm.sum(&sum_pressure_watervolume, 1); comm.sum(&sum_pressure_watervolume, 1);
comm.sum(&sum_watervolume, 1); comm.sum(&sum_watervolume, 1);

View File

@ -727,7 +727,7 @@ namespace Opm {
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilModelEbos::localConvergenceData() failed: "); OPM_END_PARALLEL_TRY_CATCH("BlackoilModelEbos::localConvergenceData() failed: ", grid_.comm());
// compute local average in terms of global number of elements // compute local average in terms of global number of elements
const int bSize = B_avg.size(); const int bSize = B_avg.size();
@ -772,7 +772,7 @@ namespace Opm {
} }
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilModelEbos::ComputeCnvError() failed: "); OPM_END_PARALLEL_TRY_CATCH("BlackoilModelEbos::ComputeCnvError() failed: ", grid_.comm());
return grid_.comm().sum(errorPV); return grid_.comm().sum(errorPV);
} }

View File

@ -25,6 +25,7 @@
#include <sys/utsname.h> #include <sys/utsname.h>
#include <dune/common/version.hh>
#include <opm/simulators/flow/SimulatorFullyImplicitBlackoilEbos.hpp> #include <opm/simulators/flow/SimulatorFullyImplicitBlackoilEbos.hpp>
#include <opm/simulators/utils/ParallelFileMerger.hpp> #include <opm/simulators/utils/ParallelFileMerger.hpp>
#include <opm/simulators/utils/moduleVersion.hpp> #include <opm/simulators/utils/moduleVersion.hpp>
@ -44,6 +45,14 @@
#include <dune/common/parallel/mpihelper.hh> #include <dune/common/parallel/mpihelper.hh>
#endif #endif
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
}
namespace Opm::Properties { namespace Opm::Properties {
template<class TypeTag, class MyTypeTag> template<class TypeTag, class MyTypeTag>
@ -104,7 +113,7 @@ namespace Opm
} }
// Read the command line parameters. Throws an exception if something goes wrong. // Read the command line parameters. Throws an exception if something goes wrong.
static int setupParameters_(int argc, char** argv) static int setupParameters_(int argc, char** argv, Parallel::Communication comm)
{ {
using ParamsMeta = GetProp<TypeTag, Properties::ParameterMetaData>; using ParamsMeta = GetProp<TypeTag, Properties::ParameterMetaData>;
if (!ParamsMeta::registrationOpen()) { if (!ParamsMeta::registrationOpen()) {
@ -214,10 +223,7 @@ namespace Opm
EWOMS_END_PARAM_REGISTRATION(TypeTag); EWOMS_END_PARAM_REGISTRATION(TypeTag);
int mpiRank = 0; int mpiRank = comm.rank();
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
#endif
// read in the command line parameters // read in the command line parameters
int status = ::Opm::setupParameters_<TypeTag>(argc, const_cast<const char**>(argv), /*doRegistration=*/false, /*allowUnused=*/true, /*handleHelp=*/(mpiRank==0)); int status = ::Opm::setupParameters_<TypeTag>(argc, const_cast<const char**>(argv), /*doRegistration=*/false, /*allowUnused=*/true, /*handleHelp=*/(mpiRank==0));
@ -229,11 +235,8 @@ namespace Opm
if (mpiRank == 0) { if (mpiRank == 0) {
unknownKeyWords = Parameters::printUnused<TypeTag>(std::cerr); unknownKeyWords = Parameters::printUnused<TypeTag>(std::cerr);
} }
#if HAVE_MPI int globalUnknownKeyWords = comm.sum(unknownKeyWords);
int globalUnknownKeyWords;
MPI_Allreduce(&unknownKeyWords, &globalUnknownKeyWords, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
unknownKeyWords = globalUnknownKeyWords; unknownKeyWords = globalUnknownKeyWords;
#endif
if ( unknownKeyWords ) if ( unknownKeyWords )
{ {
if ( mpiRank == 0 ) if ( mpiRank == 0 )
@ -275,7 +278,7 @@ namespace Opm
return status; return status;
} }
static void printBanner() static void printBanner(Parallel::Communication comm)
{ {
const int lineLen = 70; const int lineLen = 70;
const std::string version = moduleVersionName(); const std::string version = moduleVersionName();
@ -293,7 +296,6 @@ namespace Opm
std::cout << "**********************************************************************\n\n"; std::cout << "**********************************************************************\n\n";
int threads = 1; int threads = 1;
int mpiSize = 1;
#ifdef _OPENMP #ifdef _OPENMP
// This function is called before the parallel OpenMP stuff gets initialized. // This function is called before the parallel OpenMP stuff gets initialized.
@ -310,9 +312,7 @@ namespace Opm
threads = std::min(input_threads, omp_get_max_threads()); threads = std::min(input_threads, omp_get_max_threads());
#endif #endif
#if HAVE_MPI int mpiSize = comm.size();
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
#endif
std::cout << "Using "<< mpiSize << " MPI processes with "<< threads <<" OMP threads on each \n\n"; std::cout << "Using "<< mpiSize << " MPI processes with "<< threads <<" OMP threads on each \n\n";
} }
@ -402,7 +402,7 @@ namespace Opm
try { try {
// deal with some administrative boilerplate // deal with some administrative boilerplate
int status = setupParameters_(this->argc_, this->argv_); int status = setupParameters_(this->argc_, this->argv_, EclGenericVanguard::comm());
if (status) if (status)
return status; return status;
@ -450,13 +450,9 @@ namespace Opm
// determine the rank of the current process and the number of processes // determine the rank of the current process and the number of processes
// involved in the simulation. MPI must have already been initialized // involved in the simulation. MPI must have already been initialized
// here. (yes, the name of this method is misleading.) // here. (yes, the name of this method is misleading.)
#if HAVE_MPI auto comm = EclGenericVanguard::comm();
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_); mpi_rank_ = comm.rank();
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_); mpi_size_ = comm.size();
#else
mpi_rank_ = 0;
mpi_size_ = 1;
#endif
#if _OPENMP #if _OPENMP
// if openMP is available, default to 2 threads per process. // if openMP is available, default to 2 threads per process.
@ -502,7 +498,7 @@ namespace Opm
void setupEbosSimulator() void setupEbosSimulator()
{ {
ebosSimulator_.reset(new EbosSimulator(/*verbose=*/false)); ebosSimulator_.reset(new EbosSimulator(EclGenericVanguard::comm(), /*verbose=*/false));
ebosSimulator_->executionTimer().start(); ebosSimulator_->executionTimer().start();
ebosSimulator_->model().applyInitialSolution(); ebosSimulator_->model().applyInitialSolution();

View File

@ -149,8 +149,27 @@ namespace Opm
initMPI(); initMPI();
} }
#define DEMONSTRATE_RUN_WITH_NONWORLD_COMM 1
~Main() ~Main()
{ {
#if DEMONSTRATE_RUN_WITH_NONWORLD_COMM
#if HAVE_MPI
// Cannot use EclGenericVanguard::comm()
// to get world size here, as it may be
// a split communication at this point.
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
if (world_size > 1) {
MPI_Comm new_comm = EclGenericVanguard::comm();
int result;
MPI_Comm_compare(MPI_COMM_WORLD, new_comm, &result);
assert(result == MPI_UNEQUAL);
MPI_Comm_free(&new_comm);
}
#endif // HAVE_MPI
#endif // DEMONSTRATE_RUN_WITH_NONWORLD_COMM
EclGenericVanguard::setCommunication(nullptr); EclGenericVanguard::setCommunication(nullptr);
#if HAVE_MPI && !HAVE_DUNE_FEM #if HAVE_MPI && !HAVE_DUNE_FEM
@ -175,29 +194,44 @@ namespace Opm
#elif HAVE_MPI #elif HAVE_MPI
MPI_Init(&argc_, &argv_); MPI_Init(&argc_, &argv_);
#endif #endif
EclGenericVanguard::setCommunication(std::make_unique<EclGenericVanguard::CommunicationType>()); EclGenericVanguard::setCommunication(std::make_unique<Parallel::Communication>());
#if DEMONSTRATE_RUN_WITH_NONWORLD_COMM
#if HAVE_MPI
if (EclGenericVanguard::comm().size() > 1) {
int world_rank = EclGenericVanguard::comm().rank();
int color = (world_rank == 0);
MPI_Comm new_comm;
MPI_Comm_split(EclGenericVanguard::comm(), color, world_rank, &new_comm);
isSimulationRank_ = (world_rank > 0);
EclGenericVanguard::setCommunication(std::make_unique<Parallel::Communication>(new_comm));
}
#endif // HAVE_MPI
#endif // DEMONSTRATE_RUN_WITH_NONWORLD_COMM
} }
int runDynamic() int runDynamic()
{ {
int exitCode = EXIT_SUCCESS; int exitCode = EXIT_SUCCESS;
if (isSimulationRank_) {
if (initialize_<Properties::TTag::FlowEarlyBird>(exitCode)) { if (initialize_<Properties::TTag::FlowEarlyBird>(exitCode)) {
return dispatchDynamic_(); return dispatchDynamic_();
} else {
return exitCode;
} }
} }
return exitCode;
}
template <class TypeTag> template <class TypeTag>
int runStatic() int runStatic()
{ {
int exitCode = EXIT_SUCCESS; int exitCode = EXIT_SUCCESS;
if (isSimulationRank_) {
if (initialize_<TypeTag>(exitCode)) { if (initialize_<TypeTag>(exitCode)) {
return dispatchStatic_<TypeTag>(); return dispatchStatic_<TypeTag>();
} else {
return exitCode;
} }
} }
return exitCode;
}
// To be called from the Python interface code. Only do the // To be called from the Python interface code. Only do the
// initialization and then return a pointer to the FlowEbosMain // initialization and then return a pointer to the FlowEbosMain
@ -388,7 +422,7 @@ namespace Opm
using PreProblem = GetPropType<PreTypeTag, Properties::Problem>; using PreProblem = GetPropType<PreTypeTag, Properties::Problem>;
PreProblem::setBriefDescription("Flow, an advanced reservoir simulator for ECL-decks provided by the Open Porous Media project."); PreProblem::setBriefDescription("Flow, an advanced reservoir simulator for ECL-decks provided by the Open Porous Media project.");
int status = FlowMainEbos<PreTypeTag>::setupParameters_(argc_, argv_); int status = FlowMainEbos<PreTypeTag>::setupParameters_(argc_, argv_, EclGenericVanguard::comm());
if (status != 0) { if (status != 0) {
// if setupParameters_ returns a value smaller than 0, there was no error, but // if setupParameters_ returns a value smaller than 0, there was no error, but
// the program should abort. This is the case e.g. for the --help and the // the program should abort. This is the case e.g. for the --help and the
@ -433,7 +467,7 @@ namespace Opm
return false; return false;
} }
if (outputCout_) { if (outputCout_) {
FlowMainEbos<PreTypeTag>::printBanner(); FlowMainEbos<PreTypeTag>::printBanner(EclGenericVanguard::comm());
} }
// Create Deck and EclipseState. // Create Deck and EclipseState.
try { try {
@ -466,7 +500,7 @@ namespace Opm
if (output_param >= 0) if (output_param >= 0)
outputInterval = output_param; outputInterval = output_param;
readDeck(mpiRank, deckFilename, deck_, eclipseState_, schedule_, udqState_, actionState_, readDeck(EclGenericVanguard::comm(), deckFilename, deck_, eclipseState_, schedule_, udqState_, actionState_,
summaryConfig_, nullptr, python, std::move(parseContext), summaryConfig_, nullptr, python, std::move(parseContext),
init_from_restart_file, outputCout_, outputInterval); init_from_restart_file, outputCout_, outputInterval);
@ -552,6 +586,8 @@ namespace Opm
std::shared_ptr<EclipseState> eclipseState_; std::shared_ptr<EclipseState> eclipseState_;
std::shared_ptr<Schedule> schedule_; std::shared_ptr<Schedule> schedule_;
std::shared_ptr<SummaryConfig> summaryConfig_; std::shared_ptr<SummaryConfig> summaryConfig_;
// To demonstrate run with non_world_comm
bool isSimulationRank_ = true;
}; };
} // namespace Opm } // namespace Opm

View File

@ -44,6 +44,17 @@
#include <dune/common/enumset.hh> #include <dune/common/enumset.hh>
#include <opm/common/utility/platform_dependent/reenable_warnings.h> #include <opm/common/utility/platform_dependent/reenable_warnings.h>
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
} // end namespace Communication
namespace Opm namespace Opm
{ {
namespace namespace
@ -109,7 +120,7 @@ public:
return remoteIndices_; return remoteIndices_;
} }
/// \brief Get the Collective MPI communicator that we use. /// \brief Get the Collective MPI communicator that we use.
Dune::CollectiveCommunication<MPI_Comm> communicator() const Parallel::Communication communicator() const
{ {
return communicator_; return communicator_;
} }

View File

@ -128,7 +128,7 @@ namespace Amg
weights[index] = bweights; weights[index] = bweights;
++index; ++index;
} }
OPM_END_PARALLEL_TRY_CATCH("getTrueImpesWeights() failed: "); OPM_END_PARALLEL_TRY_CATCH("getTrueImpesWeights() failed: ", elemCtx.simulator().vanguard().grid().comm());
} }
} // namespace Amg } // namespace Amg

View File

@ -33,57 +33,57 @@ namespace
void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f, void packReservoirFailure(const ConvergenceReport::ReservoirFailure& f,
std::vector<char>& buf, std::vector<char>& buf,
int& offset) int& offset, MPI_Comm mpi_communicator)
{ {
int type = static_cast<int>(f.type()); int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity()); int severity = static_cast<int>(f.severity());
int phase = f.phase(); int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
} }
void packWellFailure(const ConvergenceReport::WellFailure& f, void packWellFailure(const ConvergenceReport::WellFailure& f,
std::vector<char>& buf, std::vector<char>& buf,
int& offset) int& offset, MPI_Comm mpi_communicator)
{ {
int type = static_cast<int>(f.type()); int type = static_cast<int>(f.type());
int severity = static_cast<int>(f.severity()); int severity = static_cast<int>(f.severity());
int phase = f.phase(); int phase = f.phase();
MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&type, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&severity, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&phase, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator. int name_length = f.wellName().size() + 1; // Adding 1 for the null terminator.
MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&name_length, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
MPI_Pack(const_cast<char*>(f.wellName().c_str()), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(const_cast<char*>(f.wellName().c_str()), name_length, MPI_CHAR, buf.data(), buf.size(), &offset, mpi_communicator);
} }
void packConvergenceReport(const ConvergenceReport& local_report, void packConvergenceReport(const ConvergenceReport& local_report,
std::vector<char>& buf, std::vector<char>& buf,
int& offset) int& offset, MPI_Comm mpi_communicator)
{ {
// Pack the data. // Pack the data.
// Status will not be packed, it is possible to deduce from the other data. // Status will not be packed, it is possible to deduce from the other data.
// Reservoir failures. // Reservoir failures.
const auto rf = local_report.reservoirFailures(); const auto rf = local_report.reservoirFailures();
int num_rf = rf.size(); int num_rf = rf.size();
MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&num_rf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : rf) { for (const auto& f : rf) {
packReservoirFailure(f, buf, offset); packReservoirFailure(f, buf, offset, mpi_communicator);
} }
// Well failures. // Well failures.
const auto wf = local_report.wellFailures(); const auto wf = local_report.wellFailures();
int num_wf = wf.size(); int num_wf = wf.size();
MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&num_wf, 1, MPI_INT, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& f : wf) { for (const auto& f : wf) {
packWellFailure(f, buf, offset); packWellFailure(f, buf, offset, mpi_communicator);
} }
} }
int messageSize(const ConvergenceReport& local_report) int messageSize(const ConvergenceReport& local_report, MPI_Comm mpi_communicator)
{ {
int int_pack_size = 0; int int_pack_size = 0;
MPI_Pack_size(1, MPI_INT, MPI_COMM_WORLD, &int_pack_size); MPI_Pack_size(1, MPI_INT, mpi_communicator, &int_pack_size);
const int num_rf = local_report.reservoirFailures().size(); const int num_rf = local_report.reservoirFailures().size();
const int num_wf = local_report.wellFailures().size(); const int num_wf = local_report.wellFailures().size();
int wellnames_length = 0; int wellnames_length = 0;
@ -93,33 +93,33 @@ namespace
return (2 + 3*num_rf + 4*num_wf) * int_pack_size + wellnames_length; return (2 + 3*num_rf + 4*num_wf) * int_pack_size + wellnames_length;
} }
ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset) ConvergenceReport::ReservoirFailure unpackReservoirFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{ {
int type = -1; int type = -1;
int severity = -1; int severity = -1;
int phase = -1; int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data()); auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type), return ConvergenceReport::ReservoirFailure(static_cast<ConvergenceReport::ReservoirFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity), static_cast<ConvergenceReport::Severity>(severity),
phase); phase);
} }
ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset) ConvergenceReport::WellFailure unpackWellFailure(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{ {
int type = -1; int type = -1;
int severity = -1; int severity = -1;
int phase = -1; int phase = -1;
auto* data = const_cast<char*>(recv_buffer.data()); auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &type, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &severity, 1, MPI_INT, mpi_communicator);
MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &phase, 1, MPI_INT, mpi_communicator);
int name_length = -1; int name_length = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &name_length, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &name_length, 1, MPI_INT, mpi_communicator);
std::vector<char> namechars(name_length); std::vector<char> namechars(name_length);
MPI_Unpack(data, recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, namechars.data(), name_length, MPI_CHAR, mpi_communicator);
std::string name(namechars.data()); std::string name(namechars.data());
return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type), return ConvergenceReport::WellFailure(static_cast<ConvergenceReport::WellFailure::Type>(type),
static_cast<ConvergenceReport::Severity>(severity), static_cast<ConvergenceReport::Severity>(severity),
@ -127,33 +127,33 @@ namespace
name); name);
} }
ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset) ConvergenceReport unpackSingleConvergenceReport(const std::vector<char>& recv_buffer, int& offset, MPI_Comm mpi_communicator)
{ {
ConvergenceReport cr; ConvergenceReport cr;
int num_rf = -1; int num_rf = -1;
auto* data = const_cast<char*>(recv_buffer.data()); auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &num_rf, 1, MPI_INT, mpi_communicator);
for (int rf = 0; rf < num_rf; ++rf) { for (int rf = 0; rf < num_rf; ++rf) {
ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset); ConvergenceReport::ReservoirFailure f = unpackReservoirFailure(recv_buffer, offset, mpi_communicator);
cr.setReservoirFailed(f); cr.setReservoirFailed(f);
} }
int num_wf = -1; int num_wf = -1;
MPI_Unpack(data, recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &num_wf, 1, MPI_INT, mpi_communicator);
for (int wf = 0; wf < num_wf; ++wf) { for (int wf = 0; wf < num_wf; ++wf) {
ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset); ConvergenceReport::WellFailure f = unpackWellFailure(recv_buffer, offset, mpi_communicator);
cr.setWellFailed(f); cr.setWellFailed(f);
} }
return cr; return cr;
} }
ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer, ConvergenceReport unpackConvergenceReports(const std::vector<char>& recv_buffer,
const std::vector<int>& displ) const std::vector<int>& displ, MPI_Comm mpi_communicator)
{ {
ConvergenceReport cr; ConvergenceReport cr;
const int num_processes = displ.size() - 1; const int num_processes = displ.size() - 1;
for (int process = 0; process < num_processes; ++process) { for (int process = 0; process < num_processes; ++process) {
int offset = displ[process]; int offset = displ[process];
cr += unpackSingleConvergenceReport(recv_buffer, offset); cr += unpackSingleConvergenceReport(recv_buffer, offset, mpi_communicator);
assert(offset == displ[process + 1]); assert(offset == displ[process + 1]);
} }
return cr; return cr;
@ -167,20 +167,20 @@ namespace Opm
/// Create a global convergence report combining local /// Create a global convergence report combining local
/// (per-process) reports. /// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report) ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication mpi_communicator)
{ {
// Pack local report. // Pack local report.
int message_size = messageSize(local_report); int message_size = messageSize(local_report, mpi_communicator);
std::vector<char> buffer(message_size); std::vector<char> buffer(message_size);
int offset = 0; int offset = 0;
packConvergenceReport(local_report, buffer, offset); packConvergenceReport(local_report, buffer, offset,mpi_communicator);
assert(offset == message_size); assert(offset == message_size);
// Get message sizes and create offset/displacement array for gathering. // Get message sizes and create offset/displacement array for gathering.
int num_processes = -1; int num_processes = -1;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes); MPI_Comm_size(mpi_communicator, &num_processes);
std::vector<int> message_sizes(num_processes); std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, MPI_COMM_WORLD); MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, mpi_communicator);
std::vector<int> displ(num_processes + 1, 0); std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1); std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
@ -189,10 +189,10 @@ namespace Opm
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED, MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
const_cast<char*>(recv_buffer.data()), message_sizes.data(), const_cast<char*>(recv_buffer.data()), message_sizes.data(),
displ.data(), MPI_PACKED, displ.data(), MPI_PACKED,
MPI_COMM_WORLD); mpi_communicator);
// Unpack. // Unpack.
ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ); ConvergenceReport global_report = unpackConvergenceReports(recv_buffer, displ, mpi_communicator);
return global_report; return global_report;
} }
@ -202,7 +202,7 @@ namespace Opm
namespace Opm namespace Opm
{ {
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report) ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication mpi_communicator)
{ {
return local_report; return local_report;
} }

View File

@ -21,14 +21,24 @@
#ifndef OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED #ifndef OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED
#define OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED #define OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED
#include <dune/common/version.hh>
#include <opm/simulators/timestepping/ConvergenceReport.hpp> #include <opm/simulators/timestepping/ConvergenceReport.hpp>
#include <dune/common/parallel/mpihelper.hh>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
} // end namespace Communication
namespace Opm namespace Opm
{ {
/// Create a global convergence report combining local /// Create a global convergence report combining local
/// (per-process) reports. /// (per-process) reports.
ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report); ConvergenceReport gatherConvergenceReport(const ConvergenceReport& local_report, Parallel::Communication communicator);
} // namespace Opm } // namespace Opm

View File

@ -21,9 +21,20 @@
#ifndef OPM_DEFERREDLOGGER_HEADER_INCLUDED #ifndef OPM_DEFERREDLOGGER_HEADER_INCLUDED
#define OPM_DEFERREDLOGGER_HEADER_INCLUDED #define OPM_DEFERREDLOGGER_HEADER_INCLUDED
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <string> #include <string>
#include <vector> #include <vector>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
}
namespace Opm namespace Opm
{ {
/** This class implements a deferred logger: /** This class implements a deferred logger:
@ -87,7 +98,8 @@ enum ExcEnum {
private: private:
std::vector<Message> messages_; std::vector<Message> messages_;
friend DeferredLogger gatherDeferredLogger(const DeferredLogger& local_deferredlogger); friend DeferredLogger gatherDeferredLogger(const DeferredLogger& local_deferredlogger,
Parallel::Communication mpi_communicator);
}; };
} // namespace Opm } // namespace Opm

View File

@ -22,6 +22,7 @@
#define OPM_DEFERREDLOGGINGERRORHELPERS_HPP #define OPM_DEFERREDLOGGINGERRORHELPERS_HPP
#include <opm/simulators/utils/DeferredLogger.hpp> #include <opm/simulators/utils/DeferredLogger.hpp>
#include <opm/simulators/utils/gatherDeferredLogger.hpp>
#include <opm/material/common/Exceptions.hpp> #include <opm/material/common/Exceptions.hpp>
@ -53,9 +54,11 @@
namespace { namespace {
void _throw(Opm::ExceptionType::ExcEnum exc_type, const std::string& message) { void _throw(Opm::ExceptionType::ExcEnum exc_type,
const auto& cc = Dune::MPIHelper::getCollectiveCommunication(); const std::string& message,
auto global_exc = cc.max(exc_type); Opm::Parallel::Communication comm)
{
auto global_exc = comm.max(exc_type);
switch (global_exc) { switch (global_exc) {
case Opm::ExceptionType::NONE: case Opm::ExceptionType::NONE:
@ -78,18 +81,25 @@ void _throw(Opm::ExceptionType::ExcEnum exc_type, const std::string& message) {
} }
} }
} // anonymous namespace
inline void checkForExceptionsAndThrow(Opm::ExceptionType::ExcEnum exc_type,
const std::string& message,
Opm::Parallel::Communication comm)
{
_throw(exc_type, message, comm);
} }
inline void logAndCheckForExceptionsAndThrow(Opm::DeferredLogger& deferred_logger,
Opm::ExceptionType::ExcEnum exc_type,
inline void checkForExceptionsAndThrow(Opm::ExceptionType::ExcEnum exc_type, const std::string& message) const std::string& message,
const bool terminal_output,
Opm::Parallel::Communication comm)
{ {
_throw(exc_type, message); Opm::DeferredLogger global_deferredLogger = gatherDeferredLogger(deferred_logger, comm);
}
inline void logAndCheckForExceptionsAndThrow(Opm::DeferredLogger& deferred_logger, Opm::ExceptionType::ExcEnum exc_type , const std::string& message, const bool terminal_output)
{
Opm::DeferredLogger global_deferredLogger = gatherDeferredLogger(deferred_logger);
if (terminal_output) { if (terminal_output) {
global_deferredLogger.logMessages(); global_deferredLogger.logMessages();
} }
@ -97,7 +107,7 @@ inline void logAndCheckForExceptionsAndThrow(Opm::DeferredLogger& deferred_logge
// cleared from the global logger, but we must also clear them // cleared from the global logger, but we must also clear them
// from the local logger. // from the local logger.
deferred_logger.clearMessages(); deferred_logger.clearMessages();
_throw(exc_type, message); _throw(exc_type, message, comm);
} }
@ -139,21 +149,19 @@ catch (const Opm::NumericalIssue& e){ \
/// ///
/// Assumes that OPM_BEGIN_PARALLEL_TRY_CATCH() was called to initiate /// Assumes that OPM_BEGIN_PARALLEL_TRY_CATCH() was called to initiate
/// the try-catch clause /// the try-catch clause
#define OPM_END_PARALLEL_TRY_CATCH(prefix) \ #define OPM_END_PARALLEL_TRY_CATCH(prefix, comm) \
} \ } \
OPM_PARALLEL_CATCH_CLAUSE(obptc_exc_type, obptc_exc_msg);\ OPM_PARALLEL_CATCH_CLAUSE(obptc_exc_type, obptc_exc_msg);\
checkForExceptionsAndThrow(obptc_exc_type, \ checkForExceptionsAndThrow(obptc_exc_type, \
prefix + obptc_exc_msg); prefix + obptc_exc_msg, comm);
/// \brief Catch exception, log, and throw in a parallel try-catch clause /// \brief Catch exception, log, and throw in a parallel try-catch clause
/// ///
/// Assumes that OPM_BEGIN_PARALLEL_TRY_CATCH() was called to initiate /// Assumes that OPM_BEGIN_PARALLEL_TRY_CATCH() was called to initiate
/// the try-catch clause /// the try-catch clause
#define OPM_END_PARALLEL_TRY_CATCH_LOG(obptc_logger, \ #define OPM_END_PARALLEL_TRY_CATCH_LOG(obptc_logger, obptc_prefix, obptc_output, comm)\
obptc_prefix, \
obptc_output) \
} \ } \
OPM_PARALLEL_CATCH_CLAUSE(obptc_exc_type, obptc_exc_msg); \ OPM_PARALLEL_CATCH_CLAUSE(obptc_exc_type, obptc_exc_msg); \
logAndCheckForExceptionsAndThrow(obptc_logger, obptc_exc_type, \ logAndCheckForExceptionsAndThrow(obptc_logger, obptc_exc_type, \
obptc_prefix + obptc_exc_msg, obptc_output); obptc_prefix + obptc_exc_msg, obptc_output, comm);
#endif // OPM_DEFERREDLOGGINGERRORHELPERS_HPP #endif // OPM_DEFERREDLOGGINGERRORHELPERS_HPP

View File

@ -27,7 +27,14 @@ namespace Opm {
ParallelFieldPropsManager::ParallelFieldPropsManager(FieldPropsManager& manager) ParallelFieldPropsManager::ParallelFieldPropsManager(FieldPropsManager& manager)
: m_manager(manager) : m_manager(manager)
, m_comm(Dune::MPIHelper::getCollectiveCommunication()) , m_comm(Parallel::Communication())
{
}
// EXPERIMENTAL FUNCTION TO ADD COMM AS INPUT
ParallelFieldPropsManager::ParallelFieldPropsManager(FieldPropsManager& manager, Parallel::Communication comm)
: m_manager(manager)
, m_comm(comm)
{ {
} }
@ -208,8 +215,9 @@ bool ParallelFieldPropsManager::has_double(const std::string& keyword) const
} }
ParallelEclipseState::ParallelEclipseState() ParallelEclipseState::ParallelEclipseState(Parallel::Communication comm)
: m_fieldProps(field_props) : m_fieldProps(field_props, comm)
, m_comm(comm)
{ {
} }
@ -220,13 +228,19 @@ ParallelEclipseState::ParallelEclipseState(const Deck& deck)
{ {
} }
ParallelEclipseState::ParallelEclipseState(const Deck& deck, Parallel::Communication comm)
: EclipseState(deck)
, m_fieldProps(field_props, comm)
, m_comm(comm)
{
}
const FieldPropsManager& ParallelEclipseState::fieldProps() const const FieldPropsManager& ParallelEclipseState::fieldProps() const
{ {
if (!m_parProps && Dune::MPIHelper::getCollectiveCommunication().rank() != 0) if (!m_parProps && m_comm.rank() != 0)
OPM_THROW(std::runtime_error, "Attempt to access field properties on no-root process before switch to parallel properties"); OPM_THROW(std::runtime_error, "Attempt to access field properties on no-root process before switch to parallel properties");
if (!m_parProps || Dune::MPIHelper::getCollectiveCommunication().size() == 1) if (!m_parProps || m_comm.size() == 1)
return this->EclipseState::fieldProps(); return this->EclipseState::fieldProps();
return m_fieldProps; return m_fieldProps;
@ -235,7 +249,7 @@ const FieldPropsManager& ParallelEclipseState::fieldProps() const
const FieldPropsManager& ParallelEclipseState::globalFieldProps() const const FieldPropsManager& ParallelEclipseState::globalFieldProps() const
{ {
if (Dune::MPIHelper::getCollectiveCommunication().rank() != 0) if (m_comm.rank() != 0)
OPM_THROW(std::runtime_error, "Attempt to access global field properties on non-root process"); OPM_THROW(std::runtime_error, "Attempt to access global field properties on non-root process");
return this->EclipseState::globalFieldProps(); return this->EclipseState::globalFieldProps();
} }
@ -243,7 +257,7 @@ const FieldPropsManager& ParallelEclipseState::globalFieldProps() const
const EclipseGrid& ParallelEclipseState::getInputGrid() const const EclipseGrid& ParallelEclipseState::getInputGrid() const
{ {
if (Dune::MPIHelper::getCollectiveCommunication().rank() != 0) if (m_comm.rank() != 0)
OPM_THROW(std::runtime_error, "Attempt to access eclipse grid on non-root process"); OPM_THROW(std::runtime_error, "Attempt to access eclipse grid on non-root process");
return this->EclipseState::getInputGrid(); return this->EclipseState::getInputGrid();
} }
@ -257,8 +271,7 @@ void ParallelEclipseState::switchToGlobalProps()
void ParallelEclipseState::switchToDistributedProps() void ParallelEclipseState::switchToDistributedProps()
{ {
const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); if (m_comm.size() == 1) // No need for the parallel frontend
if (comm.size() == 1) // No need for the parallel frontend
return; return;
m_parProps = true; m_parProps = true;

View File

@ -19,12 +19,21 @@
#ifndef PARALLEL_ECLIPSE_STATE_HPP #ifndef PARALLEL_ECLIPSE_STATE_HPP
#define PARALLEL_ECLIPSE_STATE_HPP #define PARALLEL_ECLIPSE_STATE_HPP
#include <dune/common/version.hh>
#include <opm/parser/eclipse/EclipseState/EclipseState.hpp> #include <opm/parser/eclipse/EclipseState/EclipseState.hpp>
#include <opm/parser/eclipse/EclipseState/Grid/TranCalculator.hpp> #include <opm/parser/eclipse/EclipseState/Grid/TranCalculator.hpp>
#include <dune/common/parallel/mpihelper.hh> #include <dune/common/parallel/mpihelper.hh>
#include <functional> #include <functional>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
} // end namespace Communication
namespace Opm { namespace Opm {
@ -47,6 +56,10 @@ public:
//! \param manager The field property manager to wrap. //! \param manager The field property manager to wrap.
ParallelFieldPropsManager(FieldPropsManager& manager); ParallelFieldPropsManager(FieldPropsManager& manager);
//! \brief Constructor.
//! \param manager The field property manager to wrap.
ParallelFieldPropsManager(FieldPropsManager& manager, Parallel::Communication comm);
//! \brief Returns actnum vector. //! \brief Returns actnum vector.
//! \details If called on non-root process an empty vector is returned //! \details If called on non-root process an empty vector is returned
std::vector<int> actnum() const override; std::vector<int> actnum() const override;
@ -106,7 +119,7 @@ protected:
std::map<std::string, Fieldprops::FieldData<int>> m_intProps; //!< Map of integer properties in process-local compressed indices. std::map<std::string, Fieldprops::FieldData<int>> m_intProps; //!< Map of integer properties in process-local compressed indices.
std::map<std::string, Fieldprops::FieldData<double>> m_doubleProps; //!< Map of double properties in process-local compressed indices. std::map<std::string, Fieldprops::FieldData<double>> m_doubleProps; //!< Map of double properties in process-local compressed indices.
FieldPropsManager& m_manager; //!< Underlying field property manager (only used on root process). FieldPropsManager& m_manager; //!< Underlying field property manager (only used on root process).
Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> m_comm; //!< Collective communication handler. Parallel::Communication m_comm; //!< Collective communication handler.
std::function<int(void)> m_activeSize; //!< active size function of the grid std::function<int(void)> m_activeSize; //!< active size function of the grid
std::function<int(const int)> m_local2Global; //!< mapping from local to global cartesian indices std::function<int(const int)> m_local2Global; //!< mapping from local to global cartesian indices
std::unordered_map<std::string, Fieldprops::TranCalculator> m_tran; //!< calculators map std::unordered_map<std::string, Fieldprops::TranCalculator> m_tran; //!< calculators map
@ -130,13 +143,19 @@ class ParallelEclipseState : public EclipseState {
friend class PropsCentroidsDataHandle; friend class PropsCentroidsDataHandle;
public: public:
//! \brief Default constructor. //! \brief Default constructor.
ParallelEclipseState(); ParallelEclipseState(Parallel::Communication comm);
//! \brief Construct from a deck instance. //! \brief Construct from a deck instance.
//! \param deck The deck to construct from //! \param deck The deck to construct from
//! \details Only called on root process //! \details Only called on root process
ParallelEclipseState(const Deck& deck); ParallelEclipseState(const Deck& deck);
//! EXPERIMENTAL FUNCTION TO ADD COMM AS INPUT.
//! \brief Construct from a deck instance.
//! \param deck The deck to construct from
//! \details Only called on root process
ParallelEclipseState(const Deck& deck, Parallel::Communication comm);
//! \brief Switch to global field properties. //! \brief Switch to global field properties.
//! \details Called on root process to use the global field properties //! \details Called on root process to use the global field properties
void switchToGlobalProps(); void switchToGlobalProps();
@ -170,6 +189,7 @@ public:
private: private:
bool m_parProps = false; //! True to use distributed properties on root process bool m_parProps = false; //! True to use distributed properties on root process
ParallelFieldPropsManager m_fieldProps; //!< The parallel field properties ParallelFieldPropsManager m_fieldProps; //!< The parallel field properties
Parallel::Communication m_comm; //!< Collective communication handler.
}; };

View File

@ -39,17 +39,17 @@
#include <opm/parser/eclipse/EclipseState/Schedule/SummaryState.hpp> #include <opm/parser/eclipse/EclipseState/Schedule/SummaryState.hpp>
#define HANDLE_AS_POD(T) \ #define HANDLE_AS_POD(T) \
std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm) \ std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm) \
{ \ { \
return packSize(data, comm, std::integral_constant<bool,true>()); \ return packSize(data, comm, std::integral_constant<bool,true>()); \
} \ } \
void pack(const T& data, std::vector<char>& buffer, int& position, \ void pack(const T& data, std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm) \ Opm::Parallel::MPIComm comm) \
{ \ { \
pack(data, buffer, position, comm, std::integral_constant<bool,true>()); \ pack(data, buffer, position, comm, std::integral_constant<bool,true>()); \
} \ } \
void unpack(T& data, std::vector<char>& buffer, int& position, \ void unpack(T& data, std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm) \ Opm::Parallel::MPIComm comm) \
{ \ { \
unpack(data, buffer, position, comm, std::integral_constant<bool,true>()); \ unpack(data, buffer, position, comm, std::integral_constant<bool,true>()); \
} }
@ -59,14 +59,14 @@ namespace Opm
namespace Mpi namespace Mpi
{ {
template<class T> template<class T>
std::size_t packSize(const T*, std::size_t, Dune::MPIHelper::MPICommunicator, std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm,
std::integral_constant<bool, false>) std::integral_constant<bool, false>)
{ {
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
} }
template<class T> template<class T>
std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator comm, std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm,
std::integral_constant<bool, true>) std::integral_constant<bool, true>)
{ {
#if HAVE_MPI #if HAVE_MPI
@ -82,19 +82,19 @@ std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator c
} }
template<class T> template<class T>
std::size_t packSize(const T* data, std::size_t l, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm)
{ {
return packSize(data, l, comm, typename std::is_pod<T>::type()); return packSize(data, l, comm, typename std::is_pod<T>::type());
} }
template<class T1, class T2> template<class T1, class T2>
std::size_t packSize(const std::pair<T1,T2>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::pair<T1,T2>& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.first, comm) + packSize(data.second, comm); return packSize(data.first, comm) + packSize(data.second, comm);
} }
template<class T> template<class T>
std::size_t packSize(const std::optional<T>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::optional<T>& data, Opm::Parallel::MPIComm comm)
{ {
bool has_value = data.has_value(); bool has_value = data.has_value();
std::size_t pack_size = packSize(has_value, comm); std::size_t pack_size = packSize(has_value, comm);
@ -105,7 +105,7 @@ std::size_t packSize(const std::optional<T>& data, Dune::MPIHelper::MPICommunica
template<class T, class A> template<class T, class A>
std::size_t packSize(const std::vector<T,A>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::vector<T,A>& data, Opm::Parallel::MPIComm comm)
{ {
if (std::is_pod<T>::value) if (std::is_pod<T>::value)
// size written automatically // size written automatically
@ -120,7 +120,7 @@ std::size_t packSize(const std::vector<T,A>& data, Dune::MPIHelper::MPICommunica
} }
template<class A> template<class A>
std::size_t packSize(const std::vector<bool,A>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::vector<bool,A>& data, Opm::Parallel::MPIComm comm)
{ {
bool entry; bool entry;
return packSize(data.size(), comm) + data.size()*packSize(entry,comm); return packSize(data.size(), comm) + data.size()*packSize(entry,comm);
@ -128,27 +128,27 @@ std::size_t packSize(const std::vector<bool,A>& data, Dune::MPIHelper::MPICommun
template<std::size_t I = 0, typename Tuple> template<std::size_t I = 0, typename Tuple>
typename std::enable_if<I == std::tuple_size<Tuple>::value, std::size_t>::type typename std::enable_if<I == std::tuple_size<Tuple>::value, std::size_t>::type
pack_size_tuple_entry(const Tuple&, Dune::MPIHelper::MPICommunicator) pack_size_tuple_entry(const Tuple&, Opm::Parallel::MPIComm)
{ {
return 0; return 0;
} }
template<std::size_t I = 0, typename Tuple> template<std::size_t I = 0, typename Tuple>
typename std::enable_if<I != std::tuple_size<Tuple>::value, std::size_t>::type typename std::enable_if<I != std::tuple_size<Tuple>::value, std::size_t>::type
pack_size_tuple_entry(const Tuple& tuple, Dune::MPIHelper::MPICommunicator comm) pack_size_tuple_entry(const Tuple& tuple, Opm::Parallel::MPIComm comm)
{ {
return packSize(std::get<I>(tuple), comm) + pack_size_tuple_entry<I+1>(tuple, comm); return packSize(std::get<I>(tuple), comm) + pack_size_tuple_entry<I+1>(tuple, comm);
} }
template<class... Ts> template<class... Ts>
std::size_t packSize(const std::tuple<Ts...>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::tuple<Ts...>& data, Opm::Parallel::MPIComm comm)
{ {
return pack_size_tuple_entry(data, comm); return pack_size_tuple_entry(data, comm);
} }
template<class T, class H, class KE, class A> template<class T, class H, class KE, class A>
std::size_t packSize(const std::unordered_set<T,H,KE,A>& data, std::size_t packSize(const std::unordered_set<T,H,KE,A>& data,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t totalSize = packSize(data.size(), comm); std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry : data) for (const auto& entry : data)
@ -160,7 +160,7 @@ std::size_t packSize(const std::unordered_set<T,H,KE,A>& data,
template<class K, class C, class A> template<class K, class C, class A>
std::size_t packSize(const std::set<K,C,A>& data, std::size_t packSize(const std::set<K,C,A>& data,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t totalSize = packSize(data.size(), comm); std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry : data) for (const auto& entry : data)
@ -170,7 +170,7 @@ std::size_t packSize(const std::set<K,C,A>& data,
return totalSize; return totalSize;
} }
std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm)
{ {
#if HAVE_MPI #if HAVE_MPI
int size; int size;
@ -185,13 +185,13 @@ std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm)
#endif #endif
} }
std::size_t packSize(const std::string& str, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::string& str, Opm::Parallel::MPIComm comm)
{ {
return packSize(str.c_str(), comm); return packSize(str.c_str(), comm);
} }
template<class T1, class T2, class C, class A> template<class T1, class T2, class C, class A>
std::size_t packSize(const std::map<T1,T2,C,A>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::map<T1,T2,C,A>& data, Opm::Parallel::MPIComm comm)
{ {
std::size_t totalSize = packSize(data.size(), comm); std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry: data) for (const auto& entry: data)
@ -202,7 +202,7 @@ std::size_t packSize(const std::map<T1,T2,C,A>& data, Dune::MPIHelper::MPICommun
} }
template<class T1, class T2, class H, class P, class A> template<class T1, class T2, class H, class P, class A>
std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Opm::Parallel::MPIComm comm)
{ {
std::size_t totalSize = packSize(data.size(), comm); std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry: data) for (const auto& entry: data)
@ -213,7 +213,7 @@ std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Dune::MPIHelpe
} }
template<class T, std::size_t N> template<class T, std::size_t N>
std::size_t packSize(const std::array<T,N>& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const std::array<T,N>& data, Opm::Parallel::MPIComm comm)
{ {
return N*packSize(data[0], comm); return N*packSize(data[0], comm);
} }
@ -227,12 +227,12 @@ HANDLE_AS_POD(data::NodeData)
HANDLE_AS_POD(data::Rates) HANDLE_AS_POD(data::Rates)
HANDLE_AS_POD(data::Segment) HANDLE_AS_POD(data::Segment)
std::size_t packSize(const data::NumericAquiferData& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::NumericAquiferData& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.initPressure, comm); return packSize(data.initPressure, comm);
} }
std::size_t packSize(const data::AquiferData& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::AquiferData& data, Opm::Parallel::MPIComm comm)
{ {
const auto type = 0ull; const auto type = 0ull;
@ -263,7 +263,7 @@ std::size_t packSize(const data::AquiferData& data, Dune::MPIHelper::MPICommunic
return base; return base;
} }
std::size_t packSize(const data::GuideRateValue&, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::GuideRateValue&, Opm::Parallel::MPIComm comm)
{ {
const auto nItem = static_cast<std::size_t>(data::GuideRateValue::Item::NumItems); const auto nItem = static_cast<std::size_t>(data::GuideRateValue::Item::NumItems);
@ -271,19 +271,19 @@ std::size_t packSize(const data::GuideRateValue&, Dune::MPIHelper::MPICommunicat
+ packSize(std::array<double, nItem>{}, comm); + packSize(std::array<double, nItem>{}, comm);
} }
std::size_t packSize(const data::GroupGuideRates& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::GroupGuideRates& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.production, comm) return packSize(data.production, comm)
+ packSize(data.injection, comm); + packSize(data.injection, comm);
} }
std::size_t packSize(const data::GroupData& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::GroupData& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.currentControl, comm) return packSize(data.currentControl, comm)
+ packSize(data.guideRates, comm); + packSize(data.guideRates, comm);
} }
std::size_t packSize(const data::Well& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::Well& data, Opm::Parallel::MPIComm comm)
{ {
std::size_t size = packSize(data.rates, comm); std::size_t size = packSize(data.rates, comm);
size += packSize(data.bhp, comm) + packSize(data.thp, comm); size += packSize(data.bhp, comm) + packSize(data.thp, comm);
@ -296,37 +296,37 @@ std::size_t packSize(const data::Well& data, Dune::MPIHelper::MPICommunicator co
return size; return size;
} }
std::size_t packSize(const data::CellData& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::CellData& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.dim, comm) + packSize(data.data, comm) + packSize(data.target, comm); return packSize(data.dim, comm) + packSize(data.data, comm) + packSize(data.target, comm);
} }
std::size_t packSize(const RestartKey& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const RestartKey& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.key, comm) + packSize(data.dim, comm) + packSize(data.required, comm); return packSize(data.key, comm) + packSize(data.dim, comm) + packSize(data.required, comm);
} }
std::size_t packSize(const data::Solution& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::Solution& data, Opm::Parallel::MPIComm comm)
{ {
// Needs explicit conversion to a supported base type holding the data // Needs explicit conversion to a supported base type holding the data
// to prevent throwing. // to prevent throwing.
return packSize(static_cast<const std::map< std::string, data::CellData>&>(data), comm); return packSize(static_cast<const std::map< std::string, data::CellData>&>(data), comm);
} }
std::size_t packSize(const data::GroupAndNetworkValues& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::GroupAndNetworkValues& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.groupData, comm) return packSize(data.groupData, comm)
+ packSize(data.nodeData, comm); + packSize(data.nodeData, comm);
} }
std::size_t packSize(const data::Wells& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const data::Wells& data, Opm::Parallel::MPIComm comm)
{ {
// Needs explicit conversion to a supported base type holding the data // Needs explicit conversion to a supported base type holding the data
// to prevent throwing. // to prevent throwing.
return packSize(static_cast<const std::map< std::string, data::Well>&>(data), comm); return packSize(static_cast<const std::map< std::string, data::Well>&>(data), comm);
} }
std::size_t packSize(const RestartValue& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const RestartValue& data, Opm::Parallel::MPIComm comm)
{ {
return packSize(data.solution, comm) return packSize(data.solution, comm)
+ packSize(data.wells, comm) + packSize(data.wells, comm)
@ -335,7 +335,7 @@ std::size_t packSize(const RestartValue& data, Dune::MPIHelper::MPICommunicator
+ packSize(data.extra, comm); + packSize(data.extra, comm);
} }
std::size_t packSize(const Opm::time_point&, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const Opm::time_point&, Opm::Parallel::MPIComm comm)
{ {
std::time_t tp; std::time_t tp;
return packSize(tp, comm); return packSize(tp, comm);
@ -346,14 +346,14 @@ std::size_t packSize(const Opm::time_point&, Dune::MPIHelper::MPICommunicator co
template<class T> template<class T>
void pack(const T*, std::size_t, std::vector<char>&, int&, void pack(const T*, std::size_t, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>) Opm::Parallel::MPIComm, std::integral_constant<bool, false>)
{ {
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
} }
template<class T> template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position, void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, Opm::Parallel::MPIComm comm,
std::integral_constant<bool, true>) std::integral_constant<bool, true>)
{ {
#if HAVE_MPI #if HAVE_MPI
@ -372,14 +372,14 @@ void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position
template<class T> template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position, void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data, l, buffer, position, comm, typename std::is_pod<T>::type()); pack(data, l, buffer, position, comm, typename std::is_pod<T>::type());
} }
template<class T1, class T2> template<class T1, class T2>
void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position, void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.first, buffer, position, comm); pack(data.first, buffer, position, comm);
pack(data.second, buffer, position, comm); pack(data.second, buffer, position, comm);
@ -387,7 +387,7 @@ void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position
template<class T> template<class T>
void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position, void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
bool has_value = data.has_value(); bool has_value = data.has_value();
pack(has_value, buffer, position, comm); pack(has_value, buffer, position, comm);
@ -398,7 +398,7 @@ void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position
template<class T, class A> template<class T, class A>
void pack(const std::vector<T, A>& data, std::vector<char>& buffer, int& position, void pack(const std::vector<T, A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
if (std::is_pod<T>::value) if (std::is_pod<T>::value)
{ {
@ -416,7 +416,7 @@ void pack(const std::vector<T, A>& data, std::vector<char>& buffer, int& positio
template<class K, class C, class A> template<class K, class C, class A>
void pack(const std::set<K,C,A>& data, void pack(const std::set<K,C,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.size(), buffer, position, comm); pack(data.size(), buffer, position, comm);
@ -429,7 +429,7 @@ void pack(const std::set<K,C,A>& data,
template<class T, class H, class KE, class A> template<class T, class H, class KE, class A>
void pack(const std::unordered_set<T,H,KE,A>& data, void pack(const std::unordered_set<T,H,KE,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.size(), buffer, position, comm); pack(data.size(), buffer, position, comm);
@ -441,7 +441,7 @@ void pack(const std::unordered_set<T,H,KE,A>& data,
template<class T, size_t N> template<class T, size_t N>
void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position, void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
for (const T& entry : data) for (const T& entry : data)
pack(entry, buffer, position, comm); pack(entry, buffer, position, comm);
@ -449,7 +449,7 @@ void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position,
template<class A> template<class A>
void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& position, void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.size(), buffer, position, comm); pack(data.size(), buffer, position, comm);
for (const auto entry : data) { // Not a reference: vector<bool> range for (const auto entry : data) { // Not a reference: vector<bool> range
@ -461,14 +461,14 @@ void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& posit
template<std::size_t I = 0, typename Tuple> template<std::size_t I = 0, typename Tuple>
typename std::enable_if<I == std::tuple_size<Tuple>::value, void>::type typename std::enable_if<I == std::tuple_size<Tuple>::value, void>::type
pack_tuple_entry(const Tuple&, std::vector<char>&, int&, pack_tuple_entry(const Tuple&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator) Opm::Parallel::MPIComm)
{ {
} }
template<std::size_t I = 0, typename Tuple> template<std::size_t I = 0, typename Tuple>
typename std::enable_if<I != std::tuple_size<Tuple>::value, void>::type typename std::enable_if<I != std::tuple_size<Tuple>::value, void>::type
pack_tuple_entry(const Tuple& tuple, std::vector<char>& buffer, pack_tuple_entry(const Tuple& tuple, std::vector<char>& buffer,
int& position, Dune::MPIHelper::MPICommunicator comm) int& position, Opm::Parallel::MPIComm comm)
{ {
pack(std::get<I>(tuple), buffer, position, comm); pack(std::get<I>(tuple), buffer, position, comm);
pack_tuple_entry<I+1>(tuple, buffer, position, comm); pack_tuple_entry<I+1>(tuple, buffer, position, comm);
@ -476,13 +476,13 @@ pack_tuple_entry(const Tuple& tuple, std::vector<char>& buffer,
template<class... Ts> template<class... Ts>
void pack(const std::tuple<Ts...>& data, std::vector<char>& buffer, void pack(const std::tuple<Ts...>& data, std::vector<char>& buffer,
int& position, Dune::MPIHelper::MPICommunicator comm) int& position, Opm::Parallel::MPIComm comm)
{ {
pack_tuple_entry(data, buffer, position, comm); pack_tuple_entry(data, buffer, position, comm);
} }
void pack(const char* str, std::vector<char>& buffer, int& position, void pack(const char* str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
#if HAVE_MPI #if HAVE_MPI
std::size_t length = strlen(str)+1; std::size_t length = strlen(str)+1;
@ -499,14 +499,14 @@ void pack(const char* str, std::vector<char>& buffer, int& position,
} }
void pack(const std::string& str, std::vector<char>& buffer, int& position, void pack(const std::string& str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(str.c_str(), buffer, position, comm); pack(str.c_str(), buffer, position, comm);
} }
template<class T1, class T2, class C, class A> template<class T1, class T2, class C, class A>
void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position, void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.size(), buffer, position, comm); pack(data.size(), buffer, position, comm);
@ -518,7 +518,7 @@ void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& posit
template<class T1, class T2, class H, class P, class A> template<class T1, class T2, class H, class P, class A>
void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position, void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.size(), buffer, position, comm); pack(data.size(), buffer, position, comm);
@ -529,13 +529,13 @@ void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer
} }
void pack(const data::NumericAquiferData& data, std::vector<char>& buffer, int& position, void pack(const data::NumericAquiferData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.initPressure, buffer, position, comm); pack(data.initPressure, buffer, position, comm);
} }
void pack(const data::AquiferData& data, std::vector<char>& buffer, int& position, void pack(const data::AquiferData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
const auto type = const auto type =
(data.typeData.is<data::AquiferType::Fetkovich>() * (1ull << 0)) (data.typeData.is<data::AquiferType::Fetkovich>() * (1ull << 0))
@ -568,7 +568,7 @@ void pack(const data::AquiferData& data, std::vector<char>& buffer, int& positio
} }
void pack(const data::GuideRateValue& data, std::vector<char>& buffer, int& position, void pack(const data::GuideRateValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
using Item = data::GuideRateValue::Item; using Item = data::GuideRateValue::Item;
const auto nItem = static_cast<std::size_t>(Item::NumItems); const auto nItem = static_cast<std::size_t>(Item::NumItems);
@ -590,21 +590,21 @@ void pack(const data::GuideRateValue& data, std::vector<char>& buffer, int& posi
} }
void pack(const data::GroupGuideRates& data, std::vector<char>& buffer, int& position, void pack(const data::GroupGuideRates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.production, buffer, position, comm); pack(data.production, buffer, position, comm);
pack(data.injection, buffer, position, comm); pack(data.injection, buffer, position, comm);
} }
void pack(const data::GroupData& data, std::vector<char>& buffer, int& position, void pack(const data::GroupData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.currentControl, buffer, position, comm); pack(data.currentControl, buffer, position, comm);
pack(data.guideRates, buffer, position, comm); pack(data.guideRates, buffer, position, comm);
} }
void pack(const data::Well& data, std::vector<char>& buffer, int& position, void pack(const data::Well& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.rates, buffer, position, comm); pack(data.rates, buffer, position, comm);
pack(data.bhp, buffer, position, comm); pack(data.bhp, buffer, position, comm);
@ -618,7 +618,7 @@ void pack(const data::Well& data, std::vector<char>& buffer, int& position,
} }
void pack(const RestartKey& data, std::vector<char>& buffer, int& position, void pack(const RestartKey& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.key, buffer, position, comm); pack(data.key, buffer, position, comm);
pack(data.dim, buffer, position, comm); pack(data.dim, buffer, position, comm);
@ -626,7 +626,7 @@ void pack(const RestartKey& data, std::vector<char>& buffer, int& position,
} }
void pack(const data::CellData& data, std::vector<char>& buffer, int& position, void pack(const data::CellData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.dim, buffer, position, comm); pack(data.dim, buffer, position, comm);
pack(data.data, buffer, position, comm); pack(data.data, buffer, position, comm);
@ -634,7 +634,7 @@ void pack(const data::CellData& data, std::vector<char>& buffer, int& position,
} }
void pack(const data::Solution& data, std::vector<char>& buffer, int& position, void pack(const data::Solution& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
// Needs explicit conversion to a supported base type holding the data // Needs explicit conversion to a supported base type holding the data
// to prevent throwing. // to prevent throwing.
@ -643,7 +643,7 @@ void pack(const data::Solution& data, std::vector<char>& buffer, int& position,
} }
void pack(const data::Wells& data, std::vector<char>& buffer, int& position, void pack(const data::Wells& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
// Needs explicit conversion to a supported base type holding the data // Needs explicit conversion to a supported base type holding the data
// to prevent throwing. // to prevent throwing.
@ -652,14 +652,14 @@ void pack(const data::Wells& data, std::vector<char>& buffer, int& position,
} }
void pack(const data::GroupAndNetworkValues& data, std::vector<char>& buffer, int& position, void pack(const data::GroupAndNetworkValues& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.groupData, buffer, position, comm); pack(data.groupData, buffer, position, comm);
pack(data.nodeData, buffer, position, comm); pack(data.nodeData, buffer, position, comm);
} }
void pack(const RestartValue& data, std::vector<char>& buffer, int& position, void pack(const RestartValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(data.solution, buffer, position, comm); pack(data.solution, buffer, position, comm);
pack(data.wells, buffer, position, comm); pack(data.wells, buffer, position, comm);
@ -669,7 +669,7 @@ void pack(const RestartValue& data, std::vector<char>& buffer, int& position,
} }
void pack(const Opm::time_point& data, std::vector<char>& buffer, int& position, void pack(const Opm::time_point& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
pack(Opm::TimeService::to_time_t(data), buffer, position, comm); pack(Opm::TimeService::to_time_t(data), buffer, position, comm);
} }
@ -679,14 +679,14 @@ void pack(const Opm::time_point& data, std::vector<char>& buffer, int& position,
template<class T> template<class T>
void unpack(T*, const std::size_t&, std::vector<char>&, int&, void unpack(T*, const std::size_t&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>) Opm::Parallel::MPIComm, std::integral_constant<bool, false>)
{ {
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
} }
template<class T> template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position, void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, Opm::Parallel::MPIComm comm,
std::integral_constant<bool, true>) std::integral_constant<bool, true>)
{ {
#if HAVE_MPI #if HAVE_MPI
@ -703,14 +703,14 @@ void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& posit
template<class T> template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position, void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data, l, buffer, position, comm, typename std::is_pod<T>::type()); unpack(data, l, buffer, position, comm, typename std::is_pod<T>::type());
} }
template<class T1, class T2> template<class T1, class T2>
void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position, void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.first, buffer, position, comm); unpack(data.first, buffer, position, comm);
unpack(data.second, buffer, position, comm); unpack(data.second, buffer, position, comm);
@ -718,7 +718,7 @@ void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
template<class T> template<class T>
void unpack(std::optional<T>&data, std::vector<char>& buffer, int& position, void unpack(std::optional<T>&data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
bool has_value; bool has_value;
unpack(has_value, buffer, position, comm); unpack(has_value, buffer, position, comm);
@ -733,7 +733,7 @@ void unpack(std::optional<T>&data, std::vector<char>& buffer, int& position,
template<class T, class A> template<class T, class A>
void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position, void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t length = 0; std::size_t length = 0;
unpack(length, buffer, position, comm); unpack(length, buffer, position, comm);
@ -751,7 +751,7 @@ void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position,
template<class A> template<class A>
void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position, void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
size_t size; size_t size;
unpack(size, buffer, position, comm); unpack(size, buffer, position, comm);
@ -767,14 +767,14 @@ void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
template<std::size_t I = 0, typename Tuple> template<std::size_t I = 0, typename Tuple>
typename std::enable_if<I == std::tuple_size<Tuple>::value, void>::type typename std::enable_if<I == std::tuple_size<Tuple>::value, void>::type
unpack_tuple_entry(Tuple&, std::vector<char>&, int&, unpack_tuple_entry(Tuple&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator) Opm::Parallel::MPIComm)
{ {
} }
template<std::size_t I = 0, typename Tuple> template<std::size_t I = 0, typename Tuple>
typename std::enable_if<I != std::tuple_size<Tuple>::value, void>::type typename std::enable_if<I != std::tuple_size<Tuple>::value, void>::type
unpack_tuple_entry(Tuple& tuple, std::vector<char>& buffer, unpack_tuple_entry(Tuple& tuple, std::vector<char>& buffer,
int& position, Dune::MPIHelper::MPICommunicator comm) int& position, Opm::Parallel::MPIComm comm)
{ {
unpack(std::get<I>(tuple), buffer, position, comm); unpack(std::get<I>(tuple), buffer, position, comm);
unpack_tuple_entry<I+1>(tuple, buffer, position, comm); unpack_tuple_entry<I+1>(tuple, buffer, position, comm);
@ -782,7 +782,7 @@ unpack_tuple_entry(Tuple& tuple, std::vector<char>& buffer,
template<class... Ts> template<class... Ts>
void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer, void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer,
int& position, Dune::MPIHelper::MPICommunicator comm) int& position, Opm::Parallel::MPIComm comm)
{ {
unpack_tuple_entry(data, buffer, position, comm); unpack_tuple_entry(data, buffer, position, comm);
} }
@ -790,7 +790,7 @@ void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer,
template<class K, class C, class A> template<class K, class C, class A>
void unpack(std::set<K,C,A>& data, void unpack(std::set<K,C,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t size = 0; std::size_t size = 0;
unpack(size, buffer, position, comm); unpack(size, buffer, position, comm);
@ -806,7 +806,7 @@ void unpack(std::set<K,C,A>& data,
template<class T, class H, class KE, class A> template<class T, class H, class KE, class A>
void unpack(std::unordered_set<T,H,KE,A>& data, void unpack(std::unordered_set<T,H,KE,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t size=0; std::size_t size=0;
unpack(size, buffer, position, comm); unpack(size, buffer, position, comm);
@ -821,14 +821,14 @@ void unpack(std::unordered_set<T,H,KE,A>& data,
template<class T, size_t N> template<class T, size_t N>
void unpack(std::array<T,N>& data, std::vector<char>& buffer, int& position, void unpack(std::array<T,N>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
for (T& entry : data) for (T& entry : data)
unpack(entry, buffer, position, comm); unpack(entry, buffer, position, comm);
} }
void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position, void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
#if HAVE_MPI #if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast<char*>(str), length, MPI_CHAR, comm); MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast<char*>(str), length, MPI_CHAR, comm);
@ -842,7 +842,7 @@ void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& posit
} }
void unpack(std::string& str, std::vector<char>& buffer, int& position, void unpack(std::string& str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t length=0; std::size_t length=0;
unpack(length, buffer, position, comm); unpack(length, buffer, position, comm);
@ -854,7 +854,7 @@ void unpack(std::string& str, std::vector<char>& buffer, int& position,
template<class T1, class T2, class C, class A> template<class T1, class T2, class C, class A>
void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position, void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t size=0; std::size_t size=0;
unpack(size, buffer, position, comm); unpack(size, buffer, position, comm);
@ -869,7 +869,7 @@ void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
template<class T1, class T2, class H, class P, class A> template<class T1, class T2, class H, class P, class A>
void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position, void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::size_t size=0; std::size_t size=0;
unpack(size, buffer, position, comm); unpack(size, buffer, position, comm);
@ -883,7 +883,7 @@ void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, in
} }
void unpack(data::Well& data, std::vector<char>& buffer, int& position, void unpack(data::Well& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.rates, buffer, position, comm); unpack(data.rates, buffer, position, comm);
unpack(data.bhp, buffer, position, comm); unpack(data.bhp, buffer, position, comm);
@ -897,13 +897,13 @@ void unpack(data::Well& data, std::vector<char>& buffer, int& position,
} }
void unpack(data::NumericAquiferData& data, std::vector<char>& buffer, int& position, void unpack(data::NumericAquiferData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.initPressure, buffer, position, comm); unpack(data.initPressure, buffer, position, comm);
} }
void unpack(data::AquiferData& data, std::vector<char>& buffer, int& position, void unpack(data::AquiferData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
auto type = 0ull; auto type = 0ull;
@ -930,7 +930,7 @@ void unpack(data::AquiferData& data, std::vector<char>& buffer, int& position,
} }
void unpack(data::GuideRateValue& data, std::vector<char>& buffer, int& position, void unpack(data::GuideRateValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
using Item = data::GuideRateValue::Item; using Item = data::GuideRateValue::Item;
const auto nItem = static_cast<std::size_t>(Item::NumItems); const auto nItem = static_cast<std::size_t>(Item::NumItems);
@ -949,21 +949,21 @@ void unpack(data::GuideRateValue& data, std::vector<char>& buffer, int& position
} }
void unpack(data::GroupGuideRates& data, std::vector<char>& buffer, int& position, void unpack(data::GroupGuideRates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.production, buffer, position, comm); unpack(data.production, buffer, position, comm);
unpack(data.injection, buffer, position, comm); unpack(data.injection, buffer, position, comm);
} }
void unpack(data::GroupData& data, std::vector<char>& buffer, int& position, void unpack(data::GroupData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.currentControl, buffer, position, comm); unpack(data.currentControl, buffer, position, comm);
unpack(data.guideRates, buffer, position, comm); unpack(data.guideRates, buffer, position, comm);
} }
void unpack(RestartKey& data, std::vector<char>& buffer, int& position, void unpack(RestartKey& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.key, buffer, position, comm); unpack(data.key, buffer, position, comm);
unpack(data.dim, buffer, position, comm); unpack(data.dim, buffer, position, comm);
@ -971,7 +971,7 @@ void unpack(RestartKey& data, std::vector<char>& buffer, int& position,
} }
void unpack(data::CellData& data, std::vector<char>& buffer, int& position, void unpack(data::CellData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.dim, buffer, position, comm); unpack(data.dim, buffer, position, comm);
unpack(data.data, buffer, position, comm); unpack(data.data, buffer, position, comm);
@ -979,7 +979,7 @@ void unpack(data::CellData& data, std::vector<char>& buffer, int& position,
} }
void unpack(data::Solution& data, std::vector<char>& buffer, int& position, void unpack(data::Solution& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
// Needs explicit conversion to a supported base type holding the data // Needs explicit conversion to a supported base type holding the data
// to prevent throwing. // to prevent throwing.
@ -988,7 +988,7 @@ void unpack(data::Solution& data, std::vector<char>& buffer, int& position,
} }
void unpack(data::Wells& data, std::vector<char>& buffer, int& position, void unpack(data::Wells& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
// Needs explicit conversion to a supported base type holding the data // Needs explicit conversion to a supported base type holding the data
// to prevent throwing. // to prevent throwing.
@ -997,14 +997,14 @@ void unpack(data::Wells& data, std::vector<char>& buffer, int& position,
} }
void unpack(data::GroupAndNetworkValues& data, std::vector<char>& buffer, int& position, void unpack(data::GroupAndNetworkValues& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.groupData, buffer, position, comm); unpack(data.groupData, buffer, position, comm);
unpack(data.nodeData, buffer, position, comm); unpack(data.nodeData, buffer, position, comm);
} }
void unpack(RestartValue& data, std::vector<char>& buffer, int& position, void unpack(RestartValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
unpack(data.solution, buffer, position, comm); unpack(data.solution, buffer, position, comm);
unpack(data.wells, buffer, position, comm); unpack(data.wells, buffer, position, comm);
@ -1014,7 +1014,7 @@ void unpack(RestartValue& data, std::vector<char>& buffer, int& position,
} }
void unpack([[maybe_unused]] Opm::time_point& data, std::vector<char>& buffer, int& position, void unpack([[maybe_unused]] Opm::time_point& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Opm::Parallel::MPIComm comm)
{ {
std::time_t tp; std::time_t tp;
unpack(tp, buffer, position, comm); unpack(tp, buffer, position, comm);
@ -1026,13 +1026,13 @@ void unpack([[maybe_unused]] Opm::time_point& data, std::vector<char>& buffer, i
#define INSTANTIATE_PACK_VECTOR(...) \ #define INSTANTIATE_PACK_VECTOR(...) \
template std::size_t packSize(const std::vector<__VA_ARGS__>& data, \ template std::size_t packSize(const std::vector<__VA_ARGS__>& data, \
Dune::MPIHelper::MPICommunicator comm); \ Opm::Parallel::MPIComm comm); \
template void pack(const std::vector<__VA_ARGS__>& data, \ template void pack(const std::vector<__VA_ARGS__>& data, \
std::vector<char>& buffer, int& position, \ std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm); \ Opm::Parallel::MPIComm comm); \
template void unpack(std::vector<__VA_ARGS__>& data, \ template void unpack(std::vector<__VA_ARGS__>& data, \
std::vector<char>& buffer, int& position, \ std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm); Opm::Parallel::MPIComm comm);
INSTANTIATE_PACK_VECTOR(float) INSTANTIATE_PACK_VECTOR(float)
INSTANTIATE_PACK_VECTOR(double) INSTANTIATE_PACK_VECTOR(double)
@ -1059,13 +1059,13 @@ INSTANTIATE_PACK_VECTOR(std::string)
#define INSTANTIATE_PACK(...) \ #define INSTANTIATE_PACK(...) \
template std::size_t packSize(const __VA_ARGS__& data, \ template std::size_t packSize(const __VA_ARGS__& data, \
Dune::MPIHelper::MPICommunicator comm); \ Opm::Parallel::MPIComm comm); \
template void pack(const __VA_ARGS__& data, \ template void pack(const __VA_ARGS__& data, \
std::vector<char>& buffer, int& position, \ std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm); \ Opm::Parallel::MPIComm comm); \
template void unpack(__VA_ARGS__& data, \ template void unpack(__VA_ARGS__& data, \
std::vector<char>& buffer, int& position, \ std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm); Opm::Parallel::MPIComm comm);
INSTANTIATE_PACK(float) INSTANTIATE_PACK(float)
INSTANTIATE_PACK(double) INSTANTIATE_PACK(double)
@ -1101,7 +1101,7 @@ INSTANTIATE_PACK(std::set<std::string>)
RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState, RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState,
const std::vector<Opm::RestartKey>& solutionKeys, const std::vector<Opm::RestartKey>& solutionKeys,
const std::vector<Opm::RestartKey>& extraKeys, const std::vector<Opm::RestartKey>& extraKeys,
Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm) Parallel::Communication comm)
{ {
#if HAVE_MPI #if HAVE_MPI
RestartValue restartValues{}; RestartValue restartValues{};

View File

@ -26,6 +26,7 @@
#include <opm/common/ErrorMacros.hpp> #include <opm/common/ErrorMacros.hpp>
#include <opm/common/utility/TimeService.hpp> #include <opm/common/utility/TimeService.hpp>
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh> #include <dune/common/parallel/mpihelper.hh>
#include <chrono> #include <chrono>
@ -39,6 +40,15 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
namespace Opm::Parallel {
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
} // end namespace Communication
namespace Opm namespace Opm
{ {
@ -77,18 +87,18 @@ class State;
namespace Mpi namespace Mpi
{ {
template<class T> template<class T>
std::size_t packSize(const T*, std::size_t, Dune::MPIHelper::MPICommunicator, std::size_t packSize(const T*, std::size_t, Parallel::MPIComm,
std::integral_constant<bool, false>); std::integral_constant<bool, false>);
template<class T> template<class T>
std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator comm, std::size_t packSize(const T*, std::size_t l, Parallel::MPIComm comm,
std::integral_constant<bool, true>); std::integral_constant<bool, true>);
template<class T> template<class T>
std::size_t packSize(const T* data, std::size_t l, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const T* data, std::size_t l, Parallel::MPIComm comm);
template<class T> template<class T>
std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator, std::size_t packSize(const T&, Parallel::MPIComm,
std::integral_constant<bool, false>) std::integral_constant<bool, false>)
{ {
std::string msg = std::string{"Packing not (yet) supported for non-pod type: "} + typeid(T).name(); std::string msg = std::string{"Packing not (yet) supported for non-pod type: "} + typeid(T).name();
@ -96,7 +106,7 @@ std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator,
} }
template<class T> template<class T>
std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator comm, std::size_t packSize(const T&, Parallel::MPIComm comm,
std::integral_constant<bool, true>) std::integral_constant<bool, true>)
{ {
#if HAVE_MPI #if HAVE_MPI
@ -110,69 +120,69 @@ std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator comm,
} }
template<class T> template<class T>
std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm) std::size_t packSize(const T& data, Parallel::MPIComm comm)
{ {
return packSize(data, comm, typename std::is_pod<T>::type()); return packSize(data, comm, typename std::is_pod<T>::type());
} }
template<class T1, class T2> template<class T1, class T2>
std::size_t packSize(const std::pair<T1,T2>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::pair<T1,T2>& data, Parallel::MPIComm comm);
template<class T> template<class T>
std::size_t packSize(const std::optional<T>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::optional<T>& data, Parallel::MPIComm comm);
template<class T, class A> template<class T, class A>
std::size_t packSize(const std::vector<T,A>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::vector<T,A>& data, Parallel::MPIComm comm);
template<class K, class C, class A> template<class K, class C, class A>
std::size_t packSize(const std::set<K,C,A>& data, std::size_t packSize(const std::set<K,C,A>& data,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, class H, class KE, class A> template<class T, class H, class KE, class A>
std::size_t packSize(const std::unordered_set<T,H,KE,A>& data, std::size_t packSize(const std::unordered_set<T,H,KE,A>& data,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class A> template<class A>
std::size_t packSize(const std::vector<bool,A>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::vector<bool,A>& data, Parallel::MPIComm comm);
template<class... Ts> template<class... Ts>
std::size_t packSize(const std::tuple<Ts...>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::tuple<Ts...>& data, Parallel::MPIComm comm);
template<class T, std::size_t N> template<class T, std::size_t N>
std::size_t packSize(const std::array<T,N>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::array<T,N>& data, Parallel::MPIComm comm);
std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const char* str, Parallel::MPIComm comm);
template<class T1, class T2, class C, class A> template<class T1, class T2, class C, class A>
std::size_t packSize(const std::map<T1,T2,C,A>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::map<T1,T2,C,A>& data, Parallel::MPIComm comm);
template<class T1, class T2, class H, class P, class A> template<class T1, class T2, class H, class P, class A>
std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Dune::MPIHelper::MPICommunicator comm); std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Parallel::MPIComm comm);
////// pack routines ////// pack routines
template<class T> template<class T>
void pack(const T*, std::size_t, std::vector<char>&, int&, void pack(const T*, std::size_t, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>); Parallel::MPIComm, std::integral_constant<bool, false>);
template<class T> template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position, void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>); Parallel::MPIComm comm, std::integral_constant<bool, true>);
template<class T> template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position, void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T> template<class T>
void pack(const T&, std::vector<char>&, int&, void pack(const T&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>) Parallel::MPIComm, std::integral_constant<bool, false>)
{ {
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
} }
template<class T> template<class T>
void pack(const T& data, std::vector<char>& buffer, int& position, void pack(const T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>) Parallel::MPIComm comm, std::integral_constant<bool, true>)
{ {
#if HAVE_MPI #if HAVE_MPI
MPI_Pack(&data, 1, Dune::MPITraits<T>::getType(), buffer.data(), MPI_Pack(&data, 1, Dune::MPITraits<T>::getType(), buffer.data(),
@ -187,81 +197,81 @@ void pack(const T& data, std::vector<char>& buffer, int& position,
template<class T> template<class T>
void pack(const T& data, std::vector<char>& buffer, int& position, void pack(const T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Parallel::MPIComm comm)
{ {
pack(data, buffer, position, comm, typename std::is_pod<T>::type()); pack(data, buffer, position, comm, typename std::is_pod<T>::type());
} }
template<class T1, class T2> template<class T1, class T2>
void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position, void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T> template<class T>
void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position, void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, class A> template<class T, class A>
void pack(const std::vector<T,A>& data, std::vector<char>& buffer, int& position, void pack(const std::vector<T,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class A> template<class A>
void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& position, void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class... Ts> template<class... Ts>
void pack(const std::tuple<Ts...>& data, std::vector<char>& buffer, void pack(const std::tuple<Ts...>& data, std::vector<char>& buffer,
int& position, Dune::MPIHelper::MPICommunicator comm); int& position, Parallel::MPIComm comm);
template<class K, class C, class A> template<class K, class C, class A>
void pack(const std::set<K,C,A>& data, void pack(const std::set<K,C,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, class H, class KE, class A> template<class T, class H, class KE, class A>
void pack(const std::unordered_set<T,H,KE,A>& data, void pack(const std::unordered_set<T,H,KE,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, size_t N> template<class T, size_t N>
void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position, void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T1, class T2, class C, class A> template<class T1, class T2, class C, class A>
void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position, void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T1, class T2, class H, class P, class A> template<class T1, class T2, class H, class P, class A>
void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position, void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
void pack(const char* str, std::vector<char>& buffer, int& position, void pack(const char* str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
/// unpack routines /// unpack routines
template<class T> template<class T>
void unpack(T*, const std::size_t&, std::vector<char>&, int&, void unpack(T*, const std::size_t&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>); Parallel::MPIComm, std::integral_constant<bool, false>);
template<class T> template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position, void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, Parallel::MPIComm comm,
std::integral_constant<bool, true>); std::integral_constant<bool, true>);
template<class T> template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position, void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T> template<class T>
void unpack(T&, std::vector<char>&, int&, void unpack(T&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>) Parallel::MPIComm, std::integral_constant<bool, false>)
{ {
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
} }
template<class T> template<class T>
void unpack(T& data, std::vector<char>& buffer, int& position, void unpack(T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>) Parallel::MPIComm comm, std::integral_constant<bool, true>)
{ {
#if HAVE_MPI #if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1, MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1,
@ -276,64 +286,64 @@ void unpack(T& data, std::vector<char>& buffer, int& position,
template<class T> template<class T>
void unpack(T& data, std::vector<char>& buffer, int& position, void unpack(T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm) Parallel::MPIComm comm)
{ {
unpack(data, buffer, position, comm, typename std::is_pod<T>::type()); unpack(data, buffer, position, comm, typename std::is_pod<T>::type());
} }
template<class T1, class T2> template<class T1, class T2>
void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position, void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T> template<class T>
void unpack(std::optional<T>& data, std::vector<char>& buffer, int& position, void unpack(std::optional<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, class A> template<class T, class A>
void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position, void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class A> template<class A>
void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position, void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class... Ts> template<class... Ts>
void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer, void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer,
int& position, Dune::MPIHelper::MPICommunicator comm); int& position, Parallel::MPIComm comm);
template<class K, class C, class A> template<class K, class C, class A>
void unpack(std::set<K,C,A>& data, void unpack(std::set<K,C,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, class H, class KE, class A> template<class T, class H, class KE, class A>
void unpack(std::unordered_set<T,H,KE,A>& data, void unpack(std::unordered_set<T,H,KE,A>& data,
std::vector<char>& buffer, int& position, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T, size_t N> template<class T, size_t N>
void unpack(std::array<T,N>& data, std::vector<char>& buffer, int& position, void unpack(std::array<T,N>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T1, class T2, class C, class A> template<class T1, class T2, class C, class A>
void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position, void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
template<class T1, class T2, class H, class P, class A> template<class T1, class T2, class H, class P, class A>
void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position, void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position, void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
/// prototypes for complex types /// prototypes for complex types
#define ADD_PACK_PROTOTYPES(T) \ #define ADD_PACK_PROTOTYPES(T) \
std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm); \ std::size_t packSize(const T& data, Parallel::MPIComm comm); \
void pack(const T& data, std::vector<char>& buffer, int& position, \ void pack(const T& data, std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm); \ Parallel::MPIComm comm); \
void unpack(T& data, std::vector<char>& buffer, int& position, \ void unpack(T& data, std::vector<char>& buffer, int& position, \
Dune::MPIHelper::MPICommunicator comm); Parallel::MPIComm comm);
ADD_PACK_PROTOTYPES(data::AquiferData) ADD_PACK_PROTOTYPES(data::AquiferData)
ADD_PACK_PROTOTYPES(data::CarterTracyData) ADD_PACK_PROTOTYPES(data::CarterTracyData)
@ -363,7 +373,7 @@ ADD_PACK_PROTOTYPES(time_point)
RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState, RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState,
const std::vector<RestartKey>& solutionKeys, const std::vector<RestartKey>& solutionKeys,
const std::vector<RestartKey>& extraKeys, const std::vector<RestartKey>& extraKeys,
Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm); Parallel::Communication comm);
} // end namespace Opm } // end namespace Opm
#endif // PARALLEL_RESTART_HPP #endif // PARALLEL_RESTART_HPP

View File

@ -40,12 +40,13 @@
namespace Opm { namespace Opm {
void eclStateBroadcast(EclipseState& eclState, Schedule& schedule,
void eclStateBroadcast(Parallel::Communication comm, EclipseState& eclState, Schedule& schedule,
SummaryConfig& summaryConfig, SummaryConfig& summaryConfig,
UDQState& udqState, UDQState& udqState,
Action::State& actionState) Action::State& actionState)
{ {
Opm::EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication()); Opm::EclMpiSerializer ser(comm);
ser.broadcast(eclState); ser.broadcast(eclState);
ser.broadcast(schedule); ser.broadcast(schedule);
ser.broadcast(summaryConfig); ser.broadcast(summaryConfig);
@ -53,9 +54,9 @@ void eclStateBroadcast(EclipseState& eclState, Schedule& schedule,
ser.broadcast(actionState); ser.broadcast(actionState);
} }
void eclScheduleBroadcast(Schedule& schedule) void eclScheduleBroadcast(Parallel::Communication comm, Schedule& schedule)
{ {
Opm::EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication()); Opm::EclMpiSerializer ser(comm);
ser.broadcast(schedule); ser.broadcast(schedule);
} }
} }

View File

@ -19,6 +19,17 @@
#ifndef PARALLEL_SERIALIZATION_HPP #ifndef PARALLEL_SERIALIZATION_HPP
#define PARALLEL_SERIALIZATION_HPP #define PARALLEL_SERIALIZATION_HPP
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
namespace Opm::Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
}
namespace Opm { namespace Opm {
class EclipseState; class EclipseState;
@ -36,13 +47,13 @@ class State;
*! \param schedule Schedule to broadcast *! \param schedule Schedule to broadcast
*! \param summaryConfig SummaryConfig to broadcast *! \param summaryConfig SummaryConfig to broadcast
*/ */
void eclStateBroadcast(EclipseState& eclState, Schedule& schedule, void eclStateBroadcast(Parallel::Communication comm, EclipseState& eclState, Schedule& schedule,
SummaryConfig& summaryConfig, SummaryConfig& summaryConfig,
UDQState& udqState, UDQState& udqState,
Action::State& actionState); Action::State& actionState);
/// \brief Broadcasts an schedule from root node in parallel runs. /// \brief Broadcasts an schedule from root node in parallel runs.
void eclScheduleBroadcast(Schedule& schedule); void eclScheduleBroadcast(Parallel::Communication comm, Schedule& schedule);
} // end namespace Opm } // end namespace Opm

View File

@ -69,7 +69,7 @@ public:
m_centroids(centroids) m_centroids(centroids)
{ {
// Scatter the keys // Scatter the keys
const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); const Parallel::Communication comm = m_grid.comm();
if (comm.rank() == 0) if (comm.rank() == 0)
{ {
const auto& globalProps = eclState.globalFieldProps(); const auto& globalProps = eclState.globalFieldProps();

View File

@ -32,55 +32,55 @@
namespace namespace
{ {
void packMessages(const std::vector<Opm::DeferredLogger::Message>& local_messages, std::vector<char>& buf, int& offset) void packMessages(const std::vector<Opm::DeferredLogger::Message>& local_messages, std::vector<char>& buf, int& offset, const Opm::Parallel::Communication mpi_communicator)
{ {
int messagesize = local_messages.size(); int messagesize = local_messages.size();
MPI_Pack(&messagesize, 1, MPI_UNSIGNED, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&messagesize, 1, MPI_UNSIGNED, buf.data(), buf.size(), &offset, mpi_communicator);
for (const auto& lm : local_messages) { for (const auto& lm : local_messages) {
MPI_Pack(static_cast<void*>(const_cast<std::int64_t*>(&lm.flag)), 1, MPI_INT64_T, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(static_cast<void*>(const_cast<std::int64_t*>(&lm.flag)), 1, MPI_INT64_T, buf.data(), buf.size(), &offset, mpi_communicator);
int tagsize = lm.tag.size(); int tagsize = lm.tag.size();
MPI_Pack(&tagsize, 1, MPI_UNSIGNED, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&tagsize, 1, MPI_UNSIGNED, buf.data(), buf.size(), &offset, mpi_communicator);
if (tagsize>0) { if (tagsize>0) {
MPI_Pack(const_cast<char*>(lm.tag.c_str()), lm.tag.size(), MPI_CHAR, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(const_cast<char*>(lm.tag.c_str()), lm.tag.size(), MPI_CHAR, buf.data(), buf.size(), &offset, mpi_communicator);
} }
int textsize = lm.text.size(); int textsize = lm.text.size();
MPI_Pack(&textsize, 1, MPI_UNSIGNED, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(&textsize, 1, MPI_UNSIGNED, buf.data(), buf.size(), &offset, mpi_communicator);
if (textsize>0) { if (textsize>0) {
MPI_Pack(const_cast<char*>(lm.text.c_str()), lm.text.size(), MPI_CHAR, buf.data(), buf.size(), &offset, MPI_COMM_WORLD); MPI_Pack(const_cast<char*>(lm.text.c_str()), lm.text.size(), MPI_CHAR, buf.data(), buf.size(), &offset, mpi_communicator);
} }
} }
} }
Opm::DeferredLogger::Message unpackSingleMessage(const std::vector<char>& recv_buffer, int& offset) Opm::DeferredLogger::Message unpackSingleMessage(const std::vector<char>& recv_buffer, int& offset, const MPI_Comm mpi_communicator)
{ {
int64_t flag; int64_t flag;
auto* data = const_cast<char*>(recv_buffer.data()); auto* data = const_cast<char*>(recv_buffer.data());
MPI_Unpack(data, recv_buffer.size(), &offset, &flag, 1, MPI_INT64_T, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &flag, 1, MPI_INT64_T, mpi_communicator);
// unpack tag // unpack tag
unsigned int tagsize; unsigned int tagsize;
MPI_Unpack(data, recv_buffer.size(), &offset, &tagsize, 1, MPI_UNSIGNED, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &tagsize, 1, MPI_UNSIGNED, mpi_communicator);
std::string tag; std::string tag;
if (tagsize>0) { if (tagsize>0) {
std::vector<char> tagchars(tagsize); std::vector<char> tagchars(tagsize);
MPI_Unpack(data, recv_buffer.size(), &offset, tagchars.data(), tagsize, MPI_CHAR, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, tagchars.data(), tagsize, MPI_CHAR, mpi_communicator);
tag = std::string(tagchars.data(), tagsize); tag = std::string(tagchars.data(), tagsize);
} }
// unpack text // unpack text
unsigned int textsize; unsigned int textsize;
MPI_Unpack(data, recv_buffer.size(), &offset, &textsize, 1, MPI_UNSIGNED, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &textsize, 1, MPI_UNSIGNED, mpi_communicator);
std::string text; std::string text;
if (textsize>0) { if (textsize>0) {
std::vector<char> textchars(textsize); std::vector<char> textchars(textsize);
MPI_Unpack(data, recv_buffer.size(), &offset, textchars.data(), textsize, MPI_CHAR, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, textchars.data(), textsize, MPI_CHAR, mpi_communicator);
text = std::string (textchars.data(), textsize); text = std::string (textchars.data(), textsize);
} }
return Opm::DeferredLogger::Message({flag, tag, text}); return Opm::DeferredLogger::Message({flag, tag, text});
} }
std::vector<Opm::DeferredLogger::Message> unpackMessages(const std::vector<char>& recv_buffer, const std::vector<int>& displ) std::vector<Opm::DeferredLogger::Message> unpackMessages(const std::vector<char>& recv_buffer, const std::vector<int>& displ, const MPI_Comm mpi_communicator)
{ {
std::vector<Opm::DeferredLogger::Message> messages; std::vector<Opm::DeferredLogger::Message> messages;
const int num_processes = displ.size() - 1; const int num_processes = displ.size() - 1;
@ -89,9 +89,9 @@ namespace
int offset = displ[process]; int offset = displ[process];
// unpack number of messages // unpack number of messages
unsigned int messagesize; unsigned int messagesize;
MPI_Unpack(data, recv_buffer.size(), &offset, &messagesize, 1, MPI_UNSIGNED, MPI_COMM_WORLD); MPI_Unpack(data, recv_buffer.size(), &offset, &messagesize, 1, MPI_UNSIGNED, mpi_communicator);
for (unsigned int i=0; i<messagesize; i++) { for (unsigned int i=0; i<messagesize; i++) {
messages.push_back(unpackSingleMessage(recv_buffer, offset)); messages.push_back(unpackSingleMessage(recv_buffer, offset, mpi_communicator));
} }
assert(offset == displ[process + 1]); assert(offset == displ[process + 1]);
} }
@ -105,15 +105,16 @@ namespace Opm
{ {
/// combine (per-process) messages /// combine (per-process) messages
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger& local_deferredlogger) Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger& local_deferredlogger,
Opm::Parallel::Communication mpi_communicator)
{ {
int num_messages = local_deferredlogger.messages_.size(); int num_messages = local_deferredlogger.messages_.size();
int int64_mpi_pack_size; int int64_mpi_pack_size;
MPI_Pack_size(1, MPI_INT64_T, MPI_COMM_WORLD, &int64_mpi_pack_size); MPI_Pack_size(1, MPI_INT64_T, mpi_communicator, &int64_mpi_pack_size);
int unsigned_int_mpi_pack_size; int unsigned_int_mpi_pack_size;
MPI_Pack_size(1, MPI_UNSIGNED, MPI_COMM_WORLD, &unsigned_int_mpi_pack_size); MPI_Pack_size(1, MPI_UNSIGNED, mpi_communicator, &unsigned_int_mpi_pack_size);
// store number of messages; // store number of messages;
int message_size = unsigned_int_mpi_pack_size; int message_size = unsigned_int_mpi_pack_size;
@ -124,9 +125,9 @@ namespace Opm
for (const auto& lm : local_deferredlogger.messages_) { for (const auto& lm : local_deferredlogger.messages_) {
int string_mpi_pack_size; int string_mpi_pack_size;
MPI_Pack_size(lm.tag.size(), MPI_CHAR, MPI_COMM_WORLD, &string_mpi_pack_size); MPI_Pack_size(lm.tag.size(), MPI_CHAR, mpi_communicator, &string_mpi_pack_size);
message_size += string_mpi_pack_size; message_size += string_mpi_pack_size;
MPI_Pack_size(lm.text.size(), MPI_CHAR, MPI_COMM_WORLD, &string_mpi_pack_size); MPI_Pack_size(lm.text.size(), MPI_CHAR, mpi_communicator, &string_mpi_pack_size);
message_size += string_mpi_pack_size; message_size += string_mpi_pack_size;
} }
@ -134,14 +135,14 @@ namespace Opm
std::vector<char> buffer(message_size); std::vector<char> buffer(message_size);
int offset = 0; int offset = 0;
packMessages(local_deferredlogger.messages_, buffer, offset); packMessages(local_deferredlogger.messages_, buffer, offset, mpi_communicator);
assert(offset == message_size); assert(offset == message_size);
// Get message sizes and create offset/displacement array for gathering. // Get message sizes and create offset/displacement array for gathering.
int num_processes = -1; int num_processes = -1;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes); MPI_Comm_size(mpi_communicator, &num_processes);
std::vector<int> message_sizes(num_processes); std::vector<int> message_sizes(num_processes);
MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, MPI_COMM_WORLD); MPI_Allgather(&message_size, 1, MPI_INT, message_sizes.data(), 1, MPI_INT, mpi_communicator);
std::vector<int> displ(num_processes + 1, 0); std::vector<int> displ(num_processes + 1, 0);
std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1); std::partial_sum(message_sizes.begin(), message_sizes.end(), displ.begin() + 1);
@ -150,11 +151,11 @@ namespace Opm
MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED, MPI_Allgatherv(buffer.data(), buffer.size(), MPI_PACKED,
const_cast<char*>(recv_buffer.data()), message_sizes.data(), const_cast<char*>(recv_buffer.data()), message_sizes.data(),
displ.data(), MPI_PACKED, displ.data(), MPI_PACKED,
MPI_COMM_WORLD); mpi_communicator);
// Unpack. // Unpack.
Opm::DeferredLogger global_deferredlogger; Opm::DeferredLogger global_deferredlogger;
global_deferredlogger.messages_ = unpackMessages(recv_buffer, displ); global_deferredlogger.messages_ = unpackMessages(recv_buffer, displ, mpi_communicator);
return global_deferredlogger; return global_deferredlogger;
} }
@ -164,7 +165,8 @@ namespace Opm
namespace Opm namespace Opm
{ {
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger& local_deferredlogger) Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger& local_deferredlogger,
Opm::Parallel::Communication /* dummy communicator */)
{ {
return local_deferredlogger; return local_deferredlogger;
} }

View File

@ -21,13 +21,15 @@
#ifndef OPM_GATHERDEFERREDLOGGER_HEADER_INCLUDED #ifndef OPM_GATHERDEFERREDLOGGER_HEADER_INCLUDED
#define OPM_GATHERDEFERREDLOGGER_HEADER_INCLUDED #define OPM_GATHERDEFERREDLOGGER_HEADER_INCLUDED
#include <dune/common/parallel/mpihelper.hh>
#include <opm/simulators/utils/DeferredLogger.hpp> #include <opm/simulators/utils/DeferredLogger.hpp>
#include <dune/common/version.hh>
namespace Opm namespace Opm
{ {
/// Create a global log combining local logs /// Create a global log combining local logs
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger& local_deferredlogger); Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger& local_deferredlogger, Parallel::Communication communicator);
} // namespace Opm } // namespace Opm

View File

@ -191,7 +191,7 @@ void setupMessageLimiter(const Opm::MessageLimits msgLimits, const std::string&
} }
void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& deck, std::shared_ptr<Opm::EclipseState>& eclipseState, void readDeck(Opm::Parallel::Communication comm, std::string& deckFilename, std::shared_ptr<Opm::Deck>& deck, std::shared_ptr<Opm::EclipseState>& eclipseState,
std::shared_ptr<Opm::Schedule>& schedule, std::unique_ptr<UDQState>& udqState, std::unique_ptr<Action::State>& actionState, std::shared_ptr<Opm::SummaryConfig>& summaryConfig, std::shared_ptr<Opm::Schedule>& schedule, std::unique_ptr<UDQState>& udqState, std::unique_ptr<Action::State>& actionState, std::shared_ptr<Opm::SummaryConfig>& summaryConfig,
std::unique_ptr<ErrorGuard> errorGuard, std::shared_ptr<Opm::Python>& python, std::unique_ptr<ParseContext> parseContext, std::unique_ptr<ErrorGuard> errorGuard, std::shared_ptr<Opm::Python>& python, std::unique_ptr<ParseContext> parseContext,
bool initFromRestart, bool checkDeck, const std::optional<int>& outputInterval) bool initFromRestart, bool checkDeck, const std::optional<int>& outputInterval)
@ -203,6 +203,7 @@ void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& d
int parseSuccess = 1; // > 0 is success int parseSuccess = 1; // > 0 is success
std::string failureMessage; std::string failureMessage;
int rank = comm.rank();
if (rank==0) { if (rank==0) {
try try
@ -216,6 +217,7 @@ void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& d
if (!deck) if (!deck)
{ {
deck = std::make_unique<Opm::Deck>( parser.parseFile(deckFilename , *parseContext, *errorGuard)); deck = std::make_unique<Opm::Deck>( parser.parseFile(deckFilename , *parseContext, *errorGuard));
Opm::KeywordValidation::KeywordValidator keyword_validator( Opm::KeywordValidation::KeywordValidator keyword_validator(
@ -233,7 +235,7 @@ void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& d
if (!eclipseState) { if (!eclipseState) {
#if HAVE_MPI #if HAVE_MPI
eclipseState = std::make_unique<Opm::ParallelEclipseState>(*deck); eclipseState = std::make_unique<Opm::ParallelEclipseState>(*deck, comm);
#else #else
eclipseState = std::make_unique<Opm::EclipseState>(*deck); eclipseState = std::make_unique<Opm::EclipseState>(*deck);
#endif #endif
@ -324,7 +326,7 @@ void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& d
if (!schedule) if (!schedule)
schedule = std::make_unique<Opm::Schedule>(python); schedule = std::make_unique<Opm::Schedule>(python);
if (!eclipseState) if (!eclipseState)
eclipseState = std::make_unique<Opm::ParallelEclipseState>(); eclipseState = std::make_unique<Opm::ParallelEclipseState>(comm);
if (!udqState) if (!udqState)
udqState = std::make_unique<UDQState>(0); udqState = std::make_unique<UDQState>(0);
if (!actionState) if (!actionState)
@ -334,13 +336,12 @@ void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& d
// In case of parse errors eclipseState/schedule might be null // In case of parse errors eclipseState/schedule might be null
// and trigger segmentation faults in parallel during broadcast // and trigger segmentation faults in parallel during broadcast
// (e.g. when serializing the non-existent TableManager) // (e.g. when serializing the non-existent TableManager)
auto comm = Dune::MPIHelper::getCollectiveCommunication();
parseSuccess = comm.min(parseSuccess); parseSuccess = comm.min(parseSuccess);
try try
{ {
if (parseSuccess) if (parseSuccess)
{ {
Opm::eclStateBroadcast(*eclipseState, *schedule, *summaryConfig, *udqState, *actionState); Opm::eclStateBroadcast(comm, *eclipseState, *schedule, *summaryConfig, *udqState, *actionState);
} }
} }
catch(const std::exception& broadcast_error) catch(const std::exception& broadcast_error)
@ -359,7 +360,7 @@ void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Opm::Deck>& d
errorGuard->clear(); errorGuard->clear();
} }
parseSuccess = Dune::MPIHelper::getCollectiveCommunication().min(parseSuccess); parseSuccess = comm.min(parseSuccess);
if (!parseSuccess) if (!parseSuccess)
{ {

View File

@ -22,6 +22,8 @@
#ifndef OPM_READDECK_HEADER_INCLUDED #ifndef OPM_READDECK_HEADER_INCLUDED
#define OPM_READDECK_HEADER_INCLUDED #define OPM_READDECK_HEADER_INCLUDED
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <string> #include <string>
@ -42,6 +44,14 @@ namespace Action {
class State; class State;
} }
namespace Parallel {
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif
}
enum class FileOutputMode { enum class FileOutputMode {
//! \brief No output to files. //! \brief No output to files.
OUTPUT_NONE = 0, OUTPUT_NONE = 0,
@ -57,7 +67,7 @@ FileOutputMode setupLogging(int mpi_rank_, const std::string& deck_filename, con
/// \brief Reads the deck and creates all necessary objects if needed /// \brief Reads the deck and creates all necessary objects if needed
/// ///
/// If pointers already contains objects then they are used otherwise they are created and can be used outside later. /// If pointers already contains objects then they are used otherwise they are created and can be used outside later.
void readDeck(int rank, std::string& deckFilename, std::shared_ptr<Deck>& deck, std::shared_ptr<EclipseState>& eclipseState, void readDeck(Parallel::Communication comm, std::string& deckFilename, std::shared_ptr<Deck>& deck, std::shared_ptr<EclipseState>& eclipseState,
std::shared_ptr<Schedule>& schedule, std::unique_ptr<UDQState>& udqState, std::unique_ptr<Action::State>& actionState, std::shared_ptr<SummaryConfig>& summaryConfig, std::shared_ptr<Schedule>& schedule, std::unique_ptr<UDQState>& udqState, std::unique_ptr<Action::State>& actionState, std::shared_ptr<SummaryConfig>& summaryConfig,
std::unique_ptr<ErrorGuard> errorGuard, std::shared_ptr<Python>& python, std::unique_ptr<ParseContext> parseContext, std::unique_ptr<ErrorGuard> errorGuard, std::shared_ptr<Python>& python, std::unique_ptr<ParseContext> parseContext,
bool initFromRestart, bool checkDeck, const std::optional<int>& outputInterval); bool initFromRestart, bool checkDeck, const std::optional<int>& outputInterval);

View File

@ -49,7 +49,7 @@ BlackoilWellModelGeneric(Schedule& schedule,
const SummaryState& summaryState, const SummaryState& summaryState,
const EclipseState& eclState, const EclipseState& eclState,
const PhaseUsage& phase_usage, const PhaseUsage& phase_usage,
const Comm& comm) const Parallel::Communication& comm)
: schedule_(schedule) : schedule_(schedule)
, summaryState_(summaryState) , summaryState_(summaryState)
, eclState_(eclState) , eclState_(eclState)
@ -1028,7 +1028,7 @@ actionOnBrokenConstraints(const Group& group,
throw("Invalid procedure for maximum rate limit selected for group" + group.name()); throw("Invalid procedure for maximum rate limit selected for group" + group.name());
} }
auto cc = Dune::MPIHelper::getCollectiveCommunication(); Parallel::Communication cc = comm_;
if (!ss.str().empty() && cc.rank() == 0) if (!ss.str().empty() && cc.rank() == 0)
deferred_logger.info(ss.str()); deferred_logger.info(ss.str());
} }
@ -1050,7 +1050,8 @@ actionOnBrokenConstraints(const Group& group,
<< " to " << Group::InjectionCMode2String(newControl); << " to " << Group::InjectionCMode2String(newControl);
this->groupState().injection_control(group.name(), controlPhase, newControl); this->groupState().injection_control(group.name(), controlPhase, newControl);
} }
auto cc = Dune::MPIHelper::getCollectiveCommunication();
Parallel::Communication cc = comm_;
if (!ss.str().empty() && cc.rank() == 0) if (!ss.str().empty() && cc.rank() == 0)
deferred_logger.info(ss.str()); deferred_logger.info(ss.str());
} }
@ -1819,7 +1820,7 @@ updateWellPotentials(const int reportStepIdx,
} }
logAndCheckForExceptionsAndThrow(deferred_logger, exc_type, logAndCheckForExceptionsAndThrow(deferred_logger, exc_type,
"computeWellPotentials() failed: " + exc_msg, "computeWellPotentials() failed: " + exc_msg,
terminal_output_); terminal_output_, comm_);
} }

View File

@ -32,6 +32,7 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include <dune/common/version.hh>
#include <opm/output/data/GuideRateValue.hpp> #include <opm/output/data/GuideRateValue.hpp>
#include <opm/parser/eclipse/EclipseState/Schedule/Well/WellTestState.hpp> #include <opm/parser/eclipse/EclipseState/Schedule/Well/WellTestState.hpp>
#include <opm/parser/eclipse/EclipseState/Schedule/Group/GuideRate.hpp> #include <opm/parser/eclipse/EclipseState/Schedule/Group/GuideRate.hpp>
@ -68,7 +69,6 @@ class BlackoilWellModelGeneric
{ {
public: public:
// --------- Types --------- // --------- Types ---------
using Comm = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
using GLiftOptWells = std::map<std::string,std::unique_ptr<GasLiftSingleWellGeneric>>; using GLiftOptWells = std::map<std::string,std::unique_ptr<GasLiftSingleWellGeneric>>;
using GLiftProdWells = std::map<std::string,const WellInterfaceGeneric*>; using GLiftProdWells = std::map<std::string,const WellInterfaceGeneric*>;
using GLiftWellStateMap = std::map<std::string,std::unique_ptr<GasLiftWellState>>; using GLiftWellStateMap = std::map<std::string,std::unique_ptr<GasLiftWellState>>;
@ -77,7 +77,7 @@ public:
const SummaryState& summaryState, const SummaryState& summaryState,
const EclipseState& eclState, const EclipseState& eclState,
const PhaseUsage& phase_usage, const PhaseUsage& phase_usage,
const Comm& comm); const Parallel::Communication& comm);
virtual ~BlackoilWellModelGeneric() = default; virtual ~BlackoilWellModelGeneric() = default;
@ -376,7 +376,7 @@ protected:
Schedule& schedule_; Schedule& schedule_;
const SummaryState& summaryState_; const SummaryState& summaryState_;
const EclipseState& eclState_; const EclipseState& eclState_;
const Comm& comm_; const Parallel::Communication& comm_;
PhaseUsage phase_usage_; PhaseUsage phase_usage_;
bool terminal_output_{false}; bool terminal_output_{false};

View File

@ -59,8 +59,10 @@ namespace Opm {
cartDims[0] * cartDims[1] * cartDims[2]); cartDims[0] * cartDims[1] * cartDims[2]);
auto& parallel_wells = ebosSimulator.vanguard().parallelWells(); auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
this->parallel_well_info_.assign(parallel_wells.begin(),
parallel_wells.end()); for (const auto& wellinfo : parallel_wells) {
this->parallel_well_info_.emplace_back(wellinfo, grid.comm());
}
} }
this->alternative_well_rate_init_ = this->alternative_well_rate_init_ =
@ -232,7 +234,7 @@ namespace Opm {
} }
} }
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "beginReportStep() failed: ", OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "beginReportStep() failed: ",
terminal_output_); terminal_output_, grid.comm());
// Store the current well state, to be able to recover in the case of failed iterations // Store the current well state, to be able to recover in the case of failed iterations
this->commitWGState(); this->commitWGState();
} }
@ -246,7 +248,6 @@ namespace Opm {
{ {
updatePerforationIntensiveQuantities(); updatePerforationIntensiveQuantities();
updateAverageFormationFactor(); updateAverageFormationFactor();
DeferredLogger local_deferredLogger; DeferredLogger local_deferredLogger;
this->resetWGState(); this->resetWGState();
@ -283,9 +284,10 @@ namespace Opm {
setRepRadiusPerfLength(); setRepRadiusPerfLength();
} }
} }
} }
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "beginTimeStep() failed: ", OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "beginTimeStep() failed: ",
terminal_output_); terminal_output_, ebosSimulator_.vanguard().grid().comm());
for (auto& well : well_container_) { for (auto& well : well_container_) {
well->setVFPProperties(vfp_properties_.get()); well->setVFPProperties(vfp_properties_.get());
@ -370,7 +372,7 @@ namespace Opm {
} }
logAndCheckForExceptionsAndThrow(local_deferredLogger, logAndCheckForExceptionsAndThrow(local_deferredLogger,
exc_type, "beginTimeStep() failed: " + exc_msg, terminal_output_); exc_type, "beginTimeStep() failed: " + exc_msg, terminal_output_, comm);
} }
@ -480,7 +482,8 @@ namespace Opm {
this->commitWGState(); this->commitWGState();
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger); const Opm::Parallel::Communication& comm = grid().comm();
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger, comm);
if (terminal_output_) { if (terminal_output_) {
global_deferredLogger.logMessages(); global_deferredLogger.logMessages();
} }
@ -546,7 +549,7 @@ namespace Opm {
perf_pressure = fs.pressure(FluidSystem::gasPhaseIdx).value(); perf_pressure = fs.pressure(FluidSystem::gasPhaseIdx).value();
} }
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::initializeWellState() failed: "); OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::initializeWellState() failed: ", ebosSimulator_.vanguard().grid().comm());
this->wellState().init(cellPressures, schedule(), wells_ecl_, local_parallel_well_info_, timeStepIdx, this->wellState().init(cellPressures, schedule(), wells_ecl_, local_parallel_well_info_, timeStepIdx,
&this->prevWellState(), well_perf_data_, &this->prevWellState(), well_perf_data_,
@ -691,7 +694,9 @@ namespace Opm {
} }
// Collect log messages and print. // Collect log messages and print.
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger);
const Opm::Parallel::Communication& comm = grid().comm();
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger, comm);
if (terminal_output_) { if (terminal_output_) {
global_deferredLogger.logMessages(); global_deferredLogger.logMessages();
} }
@ -818,7 +823,7 @@ namespace Opm {
prepareTimeStep(local_deferredLogger); prepareTimeStep(local_deferredLogger);
} }
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "assemble() failed (It=0): ", OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "assemble() failed (It=0): ",
terminal_output_); terminal_output_, grid().comm());
} }
updateWellControls(local_deferredLogger, /* check group controls */ true); updateWellControls(local_deferredLogger, /* check group controls */ true);
@ -831,7 +836,7 @@ namespace Opm {
assembleWellEq(dt, local_deferredLogger); assembleWellEq(dt, local_deferredLogger);
} }
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "assemble() failed: ", OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, "assemble() failed: ",
terminal_output_); terminal_output_, grid().comm());
last_report_.converged = true; last_report_.converged = true;
last_report_.assemble_time_well += perfTimer.stop(); last_report_.assemble_time_well += perfTimer.stop();
} }
@ -1094,6 +1099,7 @@ namespace Opm {
BlackoilWellModel<TypeTag>:: BlackoilWellModel<TypeTag>::
recoverWellSolutionAndUpdateWellState(const BVector& x) recoverWellSolutionAndUpdateWellState(const BVector& x)
{ {
DeferredLogger local_deferredLogger; DeferredLogger local_deferredLogger;
OPM_BEGIN_PARALLEL_TRY_CATCH(); OPM_BEGIN_PARALLEL_TRY_CATCH();
{ {
@ -1102,10 +1108,12 @@ namespace Opm {
well->recoverWellSolutionAndUpdateWellState(x, this->wellState(), local_deferredLogger); well->recoverWellSolutionAndUpdateWellState(x, this->wellState(), local_deferredLogger);
} }
} }
} }
OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger, OPM_END_PARALLEL_TRY_CATCH_LOG(local_deferredLogger,
"recoverWellSolutionAndUpdateWellState() failed: ", "recoverWellSolutionAndUpdateWellState() failed: ",
terminal_output_); terminal_output_, ebosSimulator_.vanguard().grid().comm());
} }
@ -1141,12 +1149,14 @@ namespace Opm {
local_report += well->getWellConvergence(this->wellState(), B_avg, local_deferredLogger, iterationIdx > param_.strict_outer_iter_wells_ ); local_report += well->getWellConvergence(this->wellState(), B_avg, local_deferredLogger, iterationIdx > param_.strict_outer_iter_wells_ );
} }
} }
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger);
const Opm::Parallel::Communication comm = grid().comm();
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger, comm);
if (terminal_output_) { if (terminal_output_) {
global_deferredLogger.logMessages(); global_deferredLogger.logMessages();
} }
ConvergenceReport report = gatherConvergenceReport(local_report); ConvergenceReport report = gatherConvergenceReport(local_report, comm);
// Log debug messages for NaN or too large residuals. // Log debug messages for NaN or too large residuals.
if (terminal_output_) { if (terminal_output_) {
@ -1294,7 +1304,8 @@ namespace Opm {
} }
} }
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger); const Opm::Parallel::Communication comm = grid().comm();
DeferredLogger global_deferredLogger = gatherDeferredLogger(local_deferredLogger, comm);
if (terminal_output_) { if (terminal_output_) {
global_deferredLogger.logMessages(); global_deferredLogger.logMessages();
} }
@ -1475,7 +1486,7 @@ namespace Opm {
B += 1 / intQuants.solventInverseFormationVolumeFactor().value(); B += 1 / intQuants.solventInverseFormationVolumeFactor().value();
} }
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAverageFormationFactor() failed: ") OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updateAverageFormationFactor() failed: ", grid.comm())
// compute global average // compute global average
grid.comm().sum(B_avg.data(), B_avg.size()); grid.comm().sum(B_avg.data(), B_avg.size());
@ -1565,7 +1576,7 @@ namespace Opm {
} }
elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0); elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
} }
OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updatePerforationIntensiveQuantities() failed: "); OPM_END_PARALLEL_TRY_CATCH("BlackoilWellModel::updatePerforationIntensiveQuantities() failed: ", ebosSimulator_.vanguard().grid().comm());
} }

View File

@ -55,11 +55,10 @@ class GasLiftGroupInfo
using GroupRateMap = using GroupRateMap =
std::map<std::string, GroupRates>; std::map<std::string, GroupRates>;
using GroupIdxMap = std::map<std::string, int>; using GroupIdxMap = std::map<std::string, int>;
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) #if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>; using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
#else #else
using Communication = Dune::CollectiveCommunication<MPIComm>; using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
#endif #endif
// TODO: same definition with WellInterface, and // TODO: same definition with WellInterface, and
@ -78,7 +77,7 @@ public:
const PhaseUsage& phase_usage, const PhaseUsage& phase_usage,
DeferredLogger& deferred_logger, DeferredLogger& deferred_logger,
WellState& well_state, WellState& well_state,
const Communication& comm); const Parallel::Communication& comm);
std::vector<std::pair<std::string,double>>& getWellGroups( std::vector<std::pair<std::string,double>>& getWellGroups(
const std::string& well_name); const std::string& well_name);
@ -163,7 +162,7 @@ private:
const PhaseUsage &phase_usage_; const PhaseUsage &phase_usage_;
DeferredLogger &deferred_logger_; DeferredLogger &deferred_logger_;
WellState &well_state_; WellState &well_state_;
const Communication &comm_; const Parallel::Communication &comm_;
const GasLiftOpt& glo_; const GasLiftOpt& glo_;
GroupRateMap group_rate_map_; GroupRateMap group_rate_map_;
Well2GroupMap well_group_map_; Well2GroupMap well_group_map_;

View File

@ -39,7 +39,7 @@ namespace Opm {
GasLiftStage2::GasLiftStage2( GasLiftStage2::GasLiftStage2(
const int report_step_idx, const int report_step_idx,
const Communication& comm, const Parallel::Communication& comm,
const Schedule& schedule, const Schedule& schedule,
const SummaryState& summary_state, const SummaryState& summary_state,
DeferredLogger &deferred_logger, DeferredLogger &deferred_logger,

View File

@ -55,18 +55,13 @@ class GasLiftStage2 {
using GradInfo = typename GasLiftSingleWellGeneric::GradInfo; using GradInfo = typename GasLiftSingleWellGeneric::GradInfo;
using GradMap = std::map<std::string, GradInfo>; using GradMap = std::map<std::string, GradInfo>;
using MPIComm = typename Dune::MPIHelper::MPICommunicator; using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
static const int Water = BlackoilPhases::Aqua; static const int Water = BlackoilPhases::Aqua;
static const int Oil = BlackoilPhases::Liquid; static const int Oil = BlackoilPhases::Liquid;
static const int Gas = BlackoilPhases::Vapour; static const int Gas = BlackoilPhases::Vapour;
public: public:
GasLiftStage2( GasLiftStage2(
const int report_step_idx, const int report_step_idx,
const Communication& comm, const Parallel::Communication& comm,
const Schedule& schedule, const Schedule& schedule,
const SummaryState& summary_state, const SummaryState& summary_state,
DeferredLogger& deferred_logger, DeferredLogger& deferred_logger,
@ -136,7 +131,7 @@ private:
const SummaryState& summary_state_; const SummaryState& summary_state_;
const Schedule& schedule_; const Schedule& schedule_;
const GasLiftOpt& glo_; const GasLiftOpt& glo_;
const Communication& comm_; const Parallel::Communication& comm_;
GradMap inc_grads_; GradMap inc_grads_;
GradMap dec_grads_; GradMap dec_grads_;
bool debug_; bool debug_;

View File

@ -357,6 +357,7 @@ ParallelWellInfo::ParallelWellInfo(const std::string& name,
commAboveBelow_(new CommunicateAboveBelow(*comm_)) commAboveBelow_(new CommunicateAboveBelow(*comm_))
{} {}
ParallelWellInfo::ParallelWellInfo(const std::pair<std::string, bool>& well_info, ParallelWellInfo::ParallelWellInfo(const std::pair<std::string, bool>& well_info,
[[maybe_unused]] Communication allComm) [[maybe_unused]] Communication allComm)
: name_(well_info.first), hasLocalCells_(well_info.second), : name_(well_info.first), hasLocalCells_(well_info.second),
@ -374,6 +375,7 @@ ParallelWellInfo::ParallelWellInfo(const std::pair<std::string,bool>& well_info,
isOwner_ = (comm_->rank() == 0); isOwner_ = (comm_->rank() == 0);
} }
void ParallelWellInfo::communicateFirstPerforation(bool hasFirst) void ParallelWellInfo::communicateFirstPerforation(bool hasFirst)
{ {
int first = hasFirst; int first = hasFirst;

View File

@ -255,7 +255,6 @@ public:
#else #else
using Communication = Dune::CollectiveCommunication<MPIComm>; using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif #endif
static constexpr int INVALID_ECL_INDEX = -1; static constexpr int INVALID_ECL_INDEX = -1;
/// \brief Constructs object using MPI_COMM_SELF /// \brief Constructs object using MPI_COMM_SELF
@ -269,7 +268,7 @@ public:
/// \param allComm The communication object with all MPI ranks active in the simulation. /// \param allComm The communication object with all MPI ranks active in the simulation.
/// Default is the one with all ranks available. /// Default is the one with all ranks available.
ParallelWellInfo(const std::pair<std::string,bool>& well_info, ParallelWellInfo(const std::pair<std::string,bool>& well_info,
Communication allComm = Communication()); Communication allComm);
const Communication& communication() const const Communication& communication() const
{ {

View File

@ -196,7 +196,7 @@ namespace Opm {
} }
} }
OPM_END_PARALLEL_TRY_CATCH("SurfaceToReservoirVoidage::defineState() failed: "); OPM_END_PARALLEL_TRY_CATCH("SurfaceToReservoirVoidage::defineState() failed: ", simulator.vanguard().grid().comm());
for (const auto& reg : rmap_.activeRegions()) { for (const auto& reg : rmap_.activeRegions()) {
auto& ra = attr_.attributes(reg); auto& ra = attr_.attributes(reg);

View File

@ -175,7 +175,7 @@ namespace Opm {
} }
} }
} }
OPM_END_PARALLEL_TRY_CATCH("AverageRegionalPressure::defineState(): "); OPM_END_PARALLEL_TRY_CATCH("AverageRegionalPressure::defineState(): ", simulator.vanguard().grid().comm());
for (int reg = 1; reg <= numRegions ; ++ reg) { for (int reg = 1; reg <= numRegions ; ++ reg) {
auto& ra = attr_.attributes(reg); auto& ra = attr_.attributes(reg);

View File

@ -85,7 +85,7 @@ protected:
static const bool gasEnabled = Indices::gasEnabled; static const bool gasEnabled = Indices::gasEnabled;
static const bool oilEnabled = Indices::oilEnabled; static const bool oilEnabled = Indices::oilEnabled;
static constexpr bool has_wfrac_variable = Indices::waterEnabled && Indices::oilEnabled;; static constexpr bool has_wfrac_variable = Indices::waterEnabled && Indices::oilEnabled;
static constexpr bool has_gfrac_variable = Indices::gasEnabled && Indices::numPhases > 1; static constexpr bool has_gfrac_variable = Indices::gasEnabled && Indices::numPhases > 1;
static constexpr int WFrac = has_wfrac_variable ? 1 : -1000; static constexpr int WFrac = has_wfrac_variable ? 1 : -1000;
static constexpr int GFrac = has_gfrac_variable ? has_wfrac_variable + 1 : -1000; static constexpr int GFrac = has_gfrac_variable ? has_wfrac_variable + 1 : -1000;

View File

@ -24,6 +24,8 @@
#include <opm/simulators/wells/GroupState.hpp> #include <opm/simulators/wells/GroupState.hpp>
#include <opm/simulators/wells/TargetCalculator.hpp> #include <opm/simulators/wells/TargetCalculator.hpp>
#include <dune/common/version.hh>
namespace Opm namespace Opm
{ {
@ -201,8 +203,7 @@ namespace Opm
changed = this->checkConstraints(well_state, group_state, schedule, summaryState, deferred_logger); changed = this->checkConstraints(well_state, group_state, schedule, summaryState, deferred_logger);
} }
auto cc = Dune::MPIHelper::getCollectiveCommunication(); Parallel::Communication cc = ebos_simulator.vanguard().grid().comm();
// checking whether control changed // checking whether control changed
if (changed) { if (changed) {
std::string to; std::string to;

View File

@ -34,6 +34,9 @@
#include <opm/parser/eclipse/EclipseState/Schedule/Events.hpp> #include <opm/parser/eclipse/EclipseState/Schedule/Events.hpp>
#include <opm/parser/eclipse/EclipseState/Schedule/Well/Well.hpp> #include <opm/parser/eclipse/EclipseState/Schedule/Well/Well.hpp>
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <functional> #include <functional>
#include <map> #include <map>
#include <optional> #include <optional>

View File

@ -25,6 +25,7 @@
#include <boost/test/unit_test.hpp> #include <boost/test/unit_test.hpp>
#include <dune/common/version.hh>
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp> #include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <dune/common/parallel/mpihelper.hh> #include <dune/common/parallel/mpihelper.hh>
@ -71,7 +72,7 @@ BOOST_AUTO_TEST_CASE(AllHaveFailure)
using CR = Opm::ConvergenceReport; using CR = Opm::ConvergenceReport;
CR cr; CR cr;
cr.setWellFailed({CR::WellFailure::Type::ControlBHP, CR::Severity::Normal, -1, name.str()}); cr.setWellFailed({CR::WellFailure::Type::ControlBHP, CR::Severity::Normal, -1, name.str()});
CR global_cr = gatherConvergenceReport(cr); CR global_cr = gatherConvergenceReport(cr, cc);
BOOST_CHECK(global_cr.wellFailures().size() == std::size_t(cc.size())); BOOST_CHECK(global_cr.wellFailures().size() == std::size_t(cc.size()));
BOOST_CHECK(global_cr.wellFailures()[cc.rank()] == cr.wellFailures()[0]); BOOST_CHECK(global_cr.wellFailures()[cc.rank()] == cr.wellFailures()[0]);
// Extra output for debugging. // Extra output for debugging.
@ -92,7 +93,7 @@ BOOST_AUTO_TEST_CASE(EvenHaveFailure)
name << "WellRank" << cc.rank() << std::flush; name << "WellRank" << cc.rank() << std::flush;
cr.setWellFailed({CR::WellFailure::Type::ControlBHP, CR::Severity::Normal, -1, name.str()}); cr.setWellFailed({CR::WellFailure::Type::ControlBHP, CR::Severity::Normal, -1, name.str()});
} }
CR global_cr = gatherConvergenceReport(cr); CR global_cr = gatherConvergenceReport(cr, cc);
BOOST_CHECK(global_cr.wellFailures().size() == std::size_t((cc.size())+1) / 2); BOOST_CHECK(global_cr.wellFailures().size() == std::size_t((cc.size())+1) / 2);
if (cc.rank() % 2 == 0) { if (cc.rank() % 2 == 0) {
BOOST_CHECK(global_cr.wellFailures()[cc.rank()/2] == cr.wellFailures()[0]); BOOST_CHECK(global_cr.wellFailures()[cc.rank()/2] == cr.wellFailures()[0]);

View File

@ -25,6 +25,7 @@
#include <boost/test/unit_test.hpp> #include <boost/test/unit_test.hpp>
#include <dune/common/version.hh>
#include <opm/simulators/utils/gatherDeferredLogger.hpp> #include <opm/simulators/utils/gatherDeferredLogger.hpp>
#include <dune/common/parallel/mpihelper.hh> #include <dune/common/parallel/mpihelper.hh>
@ -80,14 +81,14 @@ void initLogger(std::ostringstream& log_stream) {
BOOST_AUTO_TEST_CASE(NoMessages) BOOST_AUTO_TEST_CASE(NoMessages)
{ {
auto cc = Dune::MPIHelper::getCollectiveCommunication(); const Opm::Parallel::Communication& cc = Dune::MPIHelper::getCollectiveCommunication();
std::ostringstream log_stream; std::ostringstream log_stream;
initLogger(log_stream); initLogger(log_stream);
Opm::DeferredLogger local_deferredlogger; Opm::DeferredLogger local_deferredlogger;
Opm::DeferredLogger global_deferredlogger = gatherDeferredLogger(local_deferredlogger); Opm::DeferredLogger global_deferredlogger = gatherDeferredLogger(local_deferredlogger, cc);
if (cc.rank() == 0) { if (cc.rank() == 0) {
@ -103,7 +104,7 @@ BOOST_AUTO_TEST_CASE(NoMessages)
BOOST_AUTO_TEST_CASE(VariableNumberOfMessages) BOOST_AUTO_TEST_CASE(VariableNumberOfMessages)
{ {
auto cc = Dune::MPIHelper::getCollectiveCommunication(); const Opm::Parallel::Communication& cc = Dune::MPIHelper::getCollectiveCommunication();
std::ostringstream log_stream; std::ostringstream log_stream;
initLogger(log_stream); initLogger(log_stream);
@ -119,7 +120,7 @@ BOOST_AUTO_TEST_CASE(VariableNumberOfMessages)
local_deferredlogger.bug("tagme", "bug from rank " + std::to_string(cc.rank())); local_deferredlogger.bug("tagme", "bug from rank " + std::to_string(cc.rank()));
} }
Opm::DeferredLogger global_deferredlogger = gatherDeferredLogger(local_deferredlogger); Opm::DeferredLogger global_deferredlogger = gatherDeferredLogger(local_deferredlogger, cc);
if (cc.rank() == 0) { if (cc.rank() == 0) {
@ -141,7 +142,7 @@ BOOST_AUTO_TEST_CASE(VariableNumberOfMessages)
BOOST_AUTO_TEST_CASE(AllHaveOneMessage) BOOST_AUTO_TEST_CASE(AllHaveOneMessage)
{ {
auto cc = Dune::MPIHelper::getCollectiveCommunication(); const Opm::Parallel::Communication& cc = Dune::MPIHelper::getCollectiveCommunication();
std::ostringstream log_stream; std::ostringstream log_stream;
initLogger(log_stream); initLogger(log_stream);
@ -149,7 +150,7 @@ BOOST_AUTO_TEST_CASE(AllHaveOneMessage)
Opm::DeferredLogger local_deferredlogger; Opm::DeferredLogger local_deferredlogger;
local_deferredlogger.info("info from rank " + std::to_string(cc.rank())); local_deferredlogger.info("info from rank " + std::to_string(cc.rank()));
Opm::DeferredLogger global_deferredlogger = gatherDeferredLogger(local_deferredlogger); Opm::DeferredLogger global_deferredlogger = gatherDeferredLogger(local_deferredlogger, cc);
if (cc.rank() == 0) { if (cc.rank() == 0) {

View File

@ -18,7 +18,10 @@
along with OPM. If not, see <http://www.gnu.org/licenses/>. along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include<config.h> #include<config.h>
#include<opm/simulators/wells/ParallelWellInfo.hpp> #include<opm/simulators/wells/ParallelWellInfo.hpp>
#include <dune/common/version.hh>
#include<vector> #include<vector>
#include<string> #include<string>
#include<tuple> #include<tuple>
@ -113,7 +116,12 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} }; pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} };
std::vector<Opm::ParallelWellInfo> well_info; std::vector<Opm::ParallelWellInfo> well_info;
well_info.assign(pairs.begin(), pairs.end());
for (const auto& wellinfo : pairs) {
well_info.emplace_back(wellinfo, Opm::ParallelWellInfo::Communication());
}
//well_info.assign(pairs.begin(), pairs.end());
BOOST_CHECK_EQUAL_COLLECTIONS(pairs.begin(), pairs.end(), BOOST_CHECK_EQUAL_COLLECTIONS(pairs.begin(), pairs.end(),
well_info.begin(), well_info.end()); well_info.begin(), well_info.end());
@ -227,13 +235,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf1)
} }
} }
using MPIComm = typename Dune::MPIHelper::MPICommunicator; std::vector<int> createGlobalEclIndex(const Opm::ParallelWellInfo::Communication& comm)
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
std::vector<int> createGlobalEclIndex(const Communication& comm)
{ {
std::vector<int> globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; std::vector<int> globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
auto oldSize = globalEclIndex.size(); auto oldSize = globalEclIndex.size();
@ -254,7 +256,7 @@ std::vector<int> createGlobalEclIndex(const Communication& comm)
template<class C> template<class C>
std::vector<double> populateCommAbove(C& commAboveBelow, std::vector<double> populateCommAbove(C& commAboveBelow,
const Communication& comm, const Opm::ParallelWellInfo::Communication& comm,
const std::vector<int>& globalEclIndex, const std::vector<int>& globalEclIndex,
const std::vector<double> globalCurrent, const std::vector<double> globalCurrent,
int num_component = 1, int num_component = 1,
@ -285,7 +287,7 @@ std::vector<double> populateCommAbove(C& commAboveBelow,
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel) BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel)
{ {
auto comm = Communication(Dune::MPIHelper::getCommunicator()); auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
Opm::CommunicateAboveBelow commAboveBelow{ comm }; Opm::CommunicateAboveBelow commAboveBelow{ comm };
for(std::size_t count=0; count < 2; ++count) for(std::size_t count=0; count < 2; ++count)
@ -363,7 +365,7 @@ BOOST_AUTO_TEST_CASE(PartialSumself)
commAboveBelow.endReset(); commAboveBelow.endReset();
initRandomNumbers(std::begin(current), std::end(current), initRandomNumbers(std::begin(current), std::end(current),
Communication(comm)); Opm::ParallelWellInfo::Communication(comm));
auto stdCopy = current; auto stdCopy = current;
std::partial_sum(std::begin(stdCopy), std::end(stdCopy), std::begin(stdCopy)); std::partial_sum(std::begin(stdCopy), std::end(stdCopy), std::begin(stdCopy));
@ -377,13 +379,13 @@ BOOST_AUTO_TEST_CASE(PartialSumself)
BOOST_AUTO_TEST_CASE(PartialSumParallel) BOOST_AUTO_TEST_CASE(PartialSumParallel)
{ {
auto comm = Communication(Dune::MPIHelper::getCommunicator()); auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
Opm::CommunicateAboveBelow commAboveBelow{ comm }; Opm::CommunicateAboveBelow commAboveBelow{ comm };
auto globalEclIndex = createGlobalEclIndex(comm); auto globalEclIndex = createGlobalEclIndex(comm);
std::vector<double> globalCurrent(globalEclIndex.size()); std::vector<double> globalCurrent(globalEclIndex.size());
initRandomNumbers(std::begin(globalCurrent), std::end(globalCurrent), initRandomNumbers(std::begin(globalCurrent), std::end(globalCurrent),
Communication(comm)); Opm::ParallelWellInfo::Communication(comm));
auto localCurrent = populateCommAbove(commAboveBelow, comm, auto localCurrent = populateCommAbove(commAboveBelow, comm,
globalEclIndex, globalCurrent); globalEclIndex, globalCurrent);
@ -405,7 +407,7 @@ BOOST_AUTO_TEST_CASE(PartialSumParallel)
void testGlobalPerfFactoryParallel(int num_component, bool local_consecutive = false) void testGlobalPerfFactoryParallel(int num_component, bool local_consecutive = false)
{ {
auto comm = Communication(Dune::MPIHelper::getCommunicator()); auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
Opm::ParallelWellInfo wellInfo{ {"Test", true }, comm }; Opm::ParallelWellInfo wellInfo{ {"Test", true }, comm };
auto globalEclIndex = createGlobalEclIndex(comm); auto globalEclIndex = createGlobalEclIndex(comm);
@ -471,7 +473,7 @@ BOOST_AUTO_TEST_CASE(GlobalPerfFactoryParallel1)
BOOST_AUTO_TEST_CASE(EmptyWell) { BOOST_AUTO_TEST_CASE(EmptyWell) {
auto comm = Communication(Dune::MPIHelper::getCommunicator()); auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
Opm::ParallelWellInfo pw({"WELL1", true}, comm); Opm::ParallelWellInfo pw({"WELL1", true}, comm);
pw.communicateFirstPerforation(false); pw.communicateFirstPerforation(false);
double local_p = 1; double local_p = 1;

View File

@ -102,7 +102,7 @@ struct GlobalFixture {
Dune::MPIHelper::instance(argcDummy, argvDummy); Dune::MPIHelper::instance(argcDummy, argvDummy);
#endif #endif
Opm::FlowMainEbos<Opm::Properties::TTag::EclFlowProblem>::setupParameters_(argcDummy, argvDummy); Opm::FlowMainEbos<Opm::Properties::TTag::EclFlowProblem>::setupParameters_(argcDummy, argvDummy, Dune::MPIHelper::getCollectiveCommunication());
} }
}; };