Merge pull request #2033 from blattms/avoid_equil_grid-rebased

Avoid using global grid (equilGrid) on non-IO processes.
This commit is contained in:
Markus Blatt
2019-10-21 11:47:17 +02:00
committed by GitHub
6 changed files with 1199 additions and 119 deletions

View File

@@ -44,6 +44,7 @@ list (APPEND MAIN_SOURCE_FILES
opm/simulators/utils/DeferredLogger.cpp
opm/simulators/utils/gatherDeferredLogger.cpp
opm/simulators/utils/moduleVersion.cpp
opm/simulators/utils/ParallelRestart.cpp
opm/simulators/wells/VFPProdProperties.cpp
opm/simulators/wells/VFPInjProperties.cpp
)
@@ -187,6 +188,7 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/utils/DeferredLogger.hpp
opm/simulators/utils/gatherDeferredLogger.hpp
opm/simulators/utils/moduleVersion.hpp
opm/simulators/utils/ParallelRestart.hpp
opm/simulators/wells/RateConverter.hpp
opm/simulators/wells/SimFIBODetails.hpp
opm/simulators/wells/WellConnectionAuxiliaryModule.hpp

View File

@@ -101,6 +101,7 @@ public:
IndexMapType& localIndexMap_;
IndexMapStorageType& indexMaps_;
std::map<int, int> globalPosition_;
std::set<int>& recv_;
std::vector<int>& ranks_;
public:
@@ -108,21 +109,26 @@ public:
const std::vector<int>& distributedGlobalIndex,
IndexMapType& localIndexMap,
IndexMapStorageType& indexMaps,
std::vector<int>& ranks)
std::vector<int>& ranks,
std::set<int>& recv,
bool isIORank)
: distributedGlobalIndex_(distributedGlobalIndex)
, localIndexMap_(localIndexMap)
, indexMaps_(indexMaps)
, globalPosition_()
, recv_(recv)
, ranks_(ranks)
{
size_t size = globalIndex.size();
// create mapping globalIndex --> localIndex
for (size_t index = 0; index < size; ++index)
globalPosition_.insert(std::make_pair(globalIndex[index], index));
if ( isIORank ) // ioRank
for (size_t index = 0; index < size; ++index)
globalPosition_.insert(std::make_pair(globalIndex[index], index));
// we need to create a mapping from local to global
if (!indexMaps_.empty()) {
ranks_.resize(size, -1);
if (isIORank)
ranks_.resize(size, -1);
// for the ioRank create a localIndex to index in global state map
IndexMapType& indexMap = indexMaps_.back();
size_t localSize = localIndexMap_.size();
@@ -130,12 +136,44 @@ public:
for (size_t i=0; i<localSize; ++i)
{
int id = distributedGlobalIndex_[localIndexMap_[i]];
indexMap[i] = globalPosition_[id];
ranks_[indexMap[i]] = ioRank;
indexMap[i] = id;
}
}
}
~DistributeIndexMapping()
{
if (!indexMaps_.size())
return;
if ( ranks_.size() )
{
auto rankIt = recv_.begin();
// translate index maps from global cartesian to index
for (auto& indexMap: indexMaps_)
{
int rank = 0;
if (rankIt != recv_.end())
rank = *rankIt;
for (auto&& entry: indexMap)
{
auto candidate = globalPosition_.find(entry);
assert(candidate != globalPosition_.end());
entry = candidate->second;
// Using max should be backwards compatible
ranks_[entry] = std::max(ranks_[entry], rank);
}
if (rankIt != recv_.end())
++rankIt;
}
#ifndef NDEBUG
for (const auto& rank: ranks_)
assert(rank>=0);
#endif
}
}
void pack(int link, MessageBufferType& buffer)
{
// we should only get one link
@@ -163,15 +201,89 @@ public:
buffer.read(numCells);
indexMap.resize(numCells);
for (int index = 0; index < numCells; ++index) {
int globalId = -1;
buffer.read(globalId);
assert(globalPosition_.find(globalId) != globalPosition_.end());
indexMap[index] = globalPosition_[globalId];
ranks_[indexMap[index]] = link + 1;
buffer.read(indexMap[index]);
}
}
};
/// \brief Communication handle to scatter the global index
template<class Mapper>
class ElementIndexScatterHandle
{
public:
ElementIndexScatterHandle(const Mapper& sendMapper, const Mapper& recvMapper, std::vector<int>& elementIndices)
: sendMapper_(sendMapper), recvMapper_(recvMapper), elementIndices_(elementIndices)
{}
using DataType = int;
bool fixedsize(int /*dim*/, int /*codim*/)
{
return true;
}
template<class T>
std::size_t size(const T&)
{
return 1;
}
template<class B, class T>
void gather(B& buffer, const T& t)
{
buffer.write(sendMapper_.index(t));
}
template<class B, class T>
void scatter(B& buffer, const T& t, std::size_t)
{
buffer.read(elementIndices_[recvMapper_.index(t)]);
}
bool contains(int dim, int codim)
{
return dim==3 && codim==0;
}
private:
const Mapper& sendMapper_, recvMapper_;
std::vector<int>& elementIndices_;
};
/// \brief Communication handle to scatter the global index
template<class Mapper>
class ElementIndexHandle
{
public:
ElementIndexHandle(const Mapper& mapper, std::vector<int>& elementIndices)
: mapper_(mapper), elementIndices_(elementIndices)
{}
using DataType = int;
bool fixedsize(int /*dim*/, int /*codim*/)
{
return true;
}
template<class T>
std::size_t size(const T&)
{
return 1;
}
template<class B, class T>
void gather(B& buffer, const T& t)
{
buffer.write(elementIndices_[mapper_.index(t)]);
}
template<class B, class T>
void scatter(B& buffer, const T& t, std::size_t)
{
buffer.read(elementIndices_[mapper_.index(t)]);
}
bool contains(int dim, int codim)
{
return dim==3 && codim==0;
}
private:
const Mapper& mapper_;
std::vector<int>& elementIndices_;
};
enum { ioRank = 0 };
static const bool needsReordering =
@@ -189,48 +301,6 @@ public:
{
std::set<int> send, recv;
typedef typename Vanguard::EquilGrid::LeafGridView EquilGridView;
const EquilGridView equilGridView = vanguard.equilGrid().leafGridView();
#if DUNE_VERSION_NEWER(DUNE_GRID, 2,6)
typedef Dune::MultipleCodimMultipleGeomTypeMapper<EquilGridView> EquilElementMapper;
EquilElementMapper equilElemMapper(equilGridView, Dune::mcmgElementLayout());
#else
typedef Dune::MultipleCodimMultipleGeomTypeMapper<EquilGridView, Dune::MCMGElementLayout> EquilElementMapper;
EquilElementMapper equilElemMapper(equilGridView);
#endif
// We need a mapping from local to global grid, here we
// use equilGrid which represents a view on the global grid
const size_t globalSize = vanguard.equilGrid().leafGridView().size(0);
// reserve memory
globalCartesianIndex_.resize(globalSize, -1);
// loop over all elements (global grid) and store Cartesian index
auto elemIt = vanguard.equilGrid().leafGridView().template begin<0>();
const auto& elemEndIt = vanguard.equilGrid().leafGridView().template end<0>();
for (; elemIt != elemEndIt; ++elemIt) {
int elemIdx = equilElemMapper.index(*elemIt);
int cartElemIdx = vanguard.equilCartesianIndexMapper().cartesianIndex(elemIdx);
globalCartesianIndex_[elemIdx] = cartElemIdx;
}
// the I/O rank receives from all other ranks
if (isIORank()) {
for (int i = 0; i < comm.size(); ++i) {
if (i != ioRank)
recv.insert(i);
}
}
else // all other simply send to the I/O rank
send.insert(ioRank);
localIndexMap_.clear();
const size_t gridSize = vanguard.grid().size(0);
localIndexMap_.reserve(gridSize);
// store the local Cartesian index
IndexMapType distributedCartesianIndex;
distributedCartesianIndex.resize(gridSize, -1);
typedef typename Vanguard::GridView LocalGridView;
const LocalGridView localGridView = vanguard.gridView();
@@ -243,6 +313,65 @@ public:
ElementMapper elemMapper(localGridView);
#endif
localIdxToGlobalIdx_.resize(localGridView.size(0), -1);
// the I/O rank receives from all other ranks
if (isIORank()) {
// We need a mapping from local to global grid, here we
// use equilGrid which represents a view on the global grid
// reserve memory
const size_t globalSize = vanguard.equilGrid().leafGridView().size(0);
globalCartesianIndex_.resize(globalSize, -1);
const EquilGridView equilGridView = vanguard.equilGrid().leafGridView();
#if DUNE_VERSION_NEWER(DUNE_GRID, 2,6)
typedef Dune::MultipleCodimMultipleGeomTypeMapper<EquilGridView> EquilElementMapper;
EquilElementMapper equilElemMapper(equilGridView, Dune::mcmgElementLayout());
#else
typedef Dune::MultipleCodimMultipleGeomTypeMapper<EquilGridView, Dune::MCMGElementLayout> EquilElementMapper;
EquilElementMapper equilElemMapper(equilGridView);
#endif
// Scatter the global index to local index for lookup during restart
ElementIndexScatterHandle<EquilElementMapper> handle(equilElemMapper, elemMapper, localIdxToGlobalIdx_);
vanguard.grid().scatterData(handle);
// loop over all elements (global grid) and store Cartesian index
auto elemIt = vanguard.equilGrid().leafGridView().template begin<0>();
const auto& elemEndIt = vanguard.equilGrid().leafGridView().template end<0>();
for (; elemIt != elemEndIt; ++elemIt) {
int elemIdx = equilElemMapper.index(*elemIt);
int cartElemIdx = vanguard.equilCartesianIndexMapper().cartesianIndex(elemIdx);
globalCartesianIndex_[elemIdx] = cartElemIdx;
}
for (int i = 0; i < comm.size(); ++i) {
if (i != ioRank)
recv.insert(i);
}
}
else
{
// all other simply send to the I/O rank
send.insert(ioRank);
// Scatter the global index to local index for lookup during restart
ElementIndexScatterHandle<ElementMapper> handle(elemMapper, elemMapper, localIdxToGlobalIdx_);
vanguard.grid().scatterData(handle);
}
// Sync the global element indices
ElementIndexHandle<ElementMapper> handle(elemMapper, localIdxToGlobalIdx_);
vanguard.grid().communicate(handle, Dune::InteriorBorder_All_Interface,
Dune::ForwardCommunication);
localIndexMap_.clear();
const size_t gridSize = vanguard.grid().size(0);
localIndexMap_.reserve(gridSize);
// store the local Cartesian index
IndexMapType distributedCartesianIndex;
distributedCartesianIndex.resize(gridSize, -1);
// A mapping for the whole grid (including the ghosts) is needed for restarts
auto eIt = localGridView.template begin<0>();
const auto& eEndIt = localGridView.template end<0>();
@@ -269,7 +398,9 @@ public:
distributedCartesianIndex,
localIndexMap_,
indexMaps_,
globalRanks_);
globalRanks_,
recv,
isIORank());
toIORankComm_.exchange(distIndexMapping);
}
}
@@ -547,15 +678,13 @@ public:
if (!isParallel())
return localIdx;
// the last indexMap is the local one
const IndexMapType& indexMap = indexMaps_.back();
if (indexMap.empty())
if (localIdxToGlobalIdx_.empty())
throw std::logic_error("index map is not created on this rank");
if (localIdx > indexMap.size())
if (localIdx > localIdxToGlobalIdx_.size())
throw std::logic_error("local index is outside map range");
return indexMap[localIdx];
return localIdxToGlobalIdx_[localIdx];
}
size_t numCells () const
@@ -569,12 +698,10 @@ public:
if (!isParallel())
return true;
// the last indexMap is the local one
const IndexMapType& indexMap = indexMaps_.back();
if (indexMap.empty())
if (localIdxToGlobalIdx_.empty())
throw std::logic_error("index map is not created on this rank");
return std::find(indexMap.begin(), indexMap.end(), globalIdx) != indexMap.end();
return std::find(localIdxToGlobalIdx_.begin(), localIdxToGlobalIdx_.end(), globalIdx) != localIdxToGlobalIdx_.end();
}
protected:
@@ -586,6 +713,7 @@ protected:
Opm::data::Solution globalCellData_;
std::map<std::pair<std::string, int>, double> globalBlockData_;
Opm::data::Wells globalWellData_;
std::vector<int> localIdxToGlobalIdx_;
};
} // end namespace Opm

View File

@@ -83,20 +83,14 @@ private:
public:
EclCpGridVanguard(Simulator& simulator)
: EclBaseVanguard<TypeTag>(simulator)
: EclBaseVanguard<TypeTag>(simulator), mpiRank()
{
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
#endif
this->callImplementationInit();
}
~EclCpGridVanguard()
{
delete cartesianIndexMapper_;
delete equilCartesianIndexMapper_;
delete grid_;
delete equilGrid_;
delete globalTrans_;
}
/*!
* \brief Return a reference to the simulation grid.
*/
@@ -119,7 +113,10 @@ public:
* same as the grid which is used for the actual simulation.
*/
const EquilGrid& equilGrid() const
{ return *equilGrid_; }
{
assert(mpiRank == 0);
return *equilGrid_;
}
/*!
* \brief Indicates that the initial condition has been computed and the memory used
@@ -130,11 +127,8 @@ public:
*/
void releaseEquilGrid()
{
delete equilGrid_;
equilGrid_ = 0;
delete equilCartesianIndexMapper_;
equilCartesianIndexMapper_ = 0;
equilGrid_.reset();
equilCartesianIndexMapper_.reset();
}
/*!
@@ -145,9 +139,7 @@ public:
void loadBalance()
{
#if HAVE_MPI
int mpiRank = 0;
int mpiSize = 1;
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
if (mpiSize > 1) {
@@ -155,9 +147,12 @@ public:
// its edge weights. since this is (kind of) a layering violation and
// transmissibilities are relatively expensive to compute, we only do it if
// more than a single process is involved in the simulation.
cartesianIndexMapper_ = new CartesianIndexMapper(*grid_);
globalTrans_ = new EclTransmissibility<TypeTag>(*this);
globalTrans_->update();
cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_));
if (grid_->size(0))
{
globalTrans_.reset(new EclTransmissibility<TypeTag>(*this));
globalTrans_->update();
}
Dune::EdgeWeightMethod edgeWeightsMethod = this->edgeWeightsMethod();
@@ -200,12 +195,11 @@ public:
}
grid_->switchToDistributedView();
delete cartesianIndexMapper_;
cartesianIndexMapper_ = nullptr;
cartesianIndexMapper_.reset();
}
#endif
cartesianIndexMapper_ = new CartesianIndexMapper(*grid_);
cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_));
this->updateGridView_();
}
@@ -217,8 +211,7 @@ public:
*/
void releaseGlobalTransmissibilities()
{
delete globalTrans_;
globalTrans_ = nullptr;
globalTrans_.reset();
}
/*!
@@ -232,18 +225,24 @@ public:
* \brief Returns mapper from compressed to cartesian indices for the EQUIL grid
*/
const CartesianIndexMapper& equilCartesianIndexMapper() const
{ return *equilCartesianIndexMapper_; }
{
assert(mpiRank == 0);
assert(equilCartesianIndexMapper_);
return *equilCartesianIndexMapper_;
}
std::unordered_set<std::string> defunctWellNames() const
{ return defunctWellNames_; }
const EclTransmissibility<TypeTag>& globalTransmissibility() const
{ return *globalTrans_; }
{
assert( globalTrans_ != nullptr );
return *globalTrans_;
}
void releaseGlobalTransmissibility()
{
delete globalTrans_;
globalTrans_ = nullptr;
globalTrans_.reset();
}
protected:
@@ -252,7 +251,7 @@ protected:
const auto& gridProps = this->eclState().get3DProperties();
const std::vector<double>& porv = gridProps.getDoubleGridProperty("PORV").getData();
grid_ = new Dune::CpGrid();
grid_.reset(new Dune::CpGrid());
grid_->processEclipseFormat(this->eclState().getInputGrid(),
/*isPeriodic=*/false,
/*flipNormals=*/false,
@@ -266,29 +265,31 @@ protected:
// the initial condition is calculated.
// After loadbalance grid_ will contain a global and distribute view.
// equilGrid_being a shallow copy only the global view.
equilGrid_ = new Dune::CpGrid(*grid_);
equilCartesianIndexMapper_ = new CartesianIndexMapper(*equilGrid_);
globalTrans_ = nullptr;
if (mpiRank == 0)
{
equilGrid_.reset(new Dune::CpGrid(*grid_));
equilCartesianIndexMapper_.reset(new CartesianIndexMapper(*equilGrid_));
}
}
// removing some connection located in inactive grid cells
void filterConnections_()
{
assert(grid_);
Grid grid = *grid_;
grid.switchToGlobalView();
const auto eclipseGrid = Opm::UgGridHelpers::createEclipseGrid(grid, this->eclState().getInputGrid());
this->schedule().filterConnections(eclipseGrid);
if (equilGrid_)
{
const auto eclipseGrid = Opm::UgGridHelpers::createEclipseGrid(equilGrid(), this->eclState().getInputGrid());
this->schedule().filterConnections(eclipseGrid);
}
}
Grid* grid_;
EquilGrid* equilGrid_;
CartesianIndexMapper* cartesianIndexMapper_;
CartesianIndexMapper* equilCartesianIndexMapper_;
std::unique_ptr<Grid> grid_;
std::unique_ptr<EquilGrid> equilGrid_;
std::unique_ptr<CartesianIndexMapper> cartesianIndexMapper_;
std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_;
EclTransmissibility<TypeTag>* globalTrans_;
std::unique_ptr<EclTransmissibility<TypeTag> > globalTrans_;
std::unordered_set<std::string> defunctWellNames_;
int mpiRank;
};
} // namespace Opm

View File

@@ -43,6 +43,7 @@
#include <opm/output/eclipse/Summary.hpp>
#include <opm/parser/eclipse/Units/UnitSystem.hpp>
#include <opm/simulators/utils/ParallelRestart.hpp>
#include <opm/grid/GridHelpers.hpp>
#include <opm/grid/utility/cartesianToCompressed.hpp>
@@ -178,12 +179,14 @@ public:
, collectToIORank_(simulator_.vanguard())
, eclOutputModule_(simulator, collectToIORank_)
{
globalGrid_ = simulator_.vanguard().grid();
globalGrid_.switchToGlobalView();
eclIO_.reset(new Opm::EclipseIO(simulator_.vanguard().eclState(),
Opm::UgGridHelpers::createEclipseGrid(globalGrid_, simulator_.vanguard().eclState().getInputGrid()),
simulator_.vanguard().schedule(),
simulator_.vanguard().summaryConfig()));
if (collectToIORank_.isIORank()) {
globalGrid_ = simulator_.vanguard().grid();
globalGrid_.switchToGlobalView();
eclIO_.reset(new Opm::EclipseIO(simulator_.vanguard().eclState(),
Opm::UgGridHelpers::createEclipseGrid(globalGrid_, simulator_.vanguard().eclState().getInputGrid()),
simulator_.vanguard().schedule(),
simulator_.vanguard().summaryConfig()));
}
// create output thread if enabled and rank is I/O rank
// async output is enabled by default if pthread are enabled
@@ -198,7 +201,10 @@ public:
{ }
const Opm::EclipseIO& eclIO() const
{ return *eclIO_; }
{
assert(eclIO_);
return *eclIO_;
}
void writeInit()
{
@@ -239,7 +245,6 @@ public:
if (reportStepNum == 0)
return;
const auto& summary = eclIO_->summary();
Scalar curTime = simulator_.time() + simulator_.timeStepSize();
Scalar totalCpuTime =
simulator_.executionTimer().realTimeElapsed() +
@@ -272,6 +277,7 @@ public:
std::vector<char> buffer;
if (collectToIORank_.isIORank()) {
const auto& summary = eclIO_->summary();
const auto& eclState = simulator_.vanguard().eclState();
// Add TCPU
@@ -423,7 +429,8 @@ public:
{
Opm::SummaryState& summaryState = simulator_.vanguard().summaryState();
auto restartValues = eclIO_->loadRestart(summaryState, solutionKeys, extraKeys);
auto restartValues = loadParallelRestart(eclIO_.get(), summaryState, solutionKeys, extraKeys,
gridView.grid().comm());
for (unsigned elemIdx = 0; elemIdx < numElements; ++elemIdx) {
unsigned globalIdx = collectToIORank_.localIdxToGlobalIdx(elemIdx);
@@ -484,9 +491,10 @@ private:
#endif
const auto& cartesianCellIdx = globalGrid_.globalCell();
const auto* globalTrans = &(simulator_.vanguard().globalTransmissibility());
const EclTransmissibility<TypeTag>* globalTrans;
if (!collectToIORank_.isParallel())
{
// in the sequential case we must use the transmissibilites defined by
// the problem. (because in the sequential case, the grid manager does
// not compute "global" transmissibilities for performance reasons. in
@@ -494,6 +502,11 @@ private:
// because this object refers to the distributed grid and we need the
// sequential version here.)
globalTrans = &simulator_.problem().eclTransmissibilities();
}
else
{
globalTrans = &(simulator_.vanguard().globalTransmissibility());
}
auto elemIt = globalGridView.template begin</*codim=*/0>();
const auto& elemEndIt = globalGridView.template end</*codim=*/0>();
@@ -583,7 +596,7 @@ private:
ElementMapper globalElemMapper(globalGridView);
#endif
const auto* globalTrans = &(simulator_.vanguard().globalTransmissibility());
const EclTransmissibility<TypeTag>* globalTrans;
if (!collectToIORank_.isParallel()) {
// in the sequential case we must use the transmissibilites defined by
// the problem. (because in the sequential case, the grid manager does
@@ -593,6 +606,10 @@ private:
// sequential version here.)
globalTrans = &simulator_.problem().eclTransmissibilities();
}
else
{
globalTrans = &(simulator_.vanguard().globalTransmissibility());
}
auto cartDims = simulator_.vanguard().cartesianIndexMapper().cartesianDimensions();
auto elemIt = globalGridView.template begin</*codim=*/0>();

View File

@@ -0,0 +1,674 @@
/*
Copyright 2019 Equinor AS.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if HAVE_MPI
#include <mpi.h>
#endif
#include "ParallelRestart.hpp"
#include <dune/common/parallel/mpitraits.hh>
namespace Opm
{
namespace Mpi
{
template<class T>
std::size_t packSize(const T*, std::size_t, Dune::MPIHelper::MPICommunicator,
std::integral_constant<bool, false>)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template<class T>
std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>)
{
#if HAVE_MPI
int size;
MPI_Pack_size(1, Dune::MPITraits<std::size_t>::getType(), comm, &size);
std::size_t totalSize = size;
MPI_Pack_size(l, Dune::MPITraits<T>::getType(), comm, &size);
return totalSize + size;
#else
(void) comm;
return l-l;
#endif
}
template<class T>
std::size_t packSize(const T* data, std::size_t l, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(data, l, comm, typename std::is_pod<T>::type());
}
template<class T>
std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator,
std::integral_constant<bool, false>)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template<class T>
std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>)
{
#if HAVE_MPI
int size{};
MPI_Pack_size(1, Dune::MPITraits<T>::getType(), comm, &size);
return size;
#else
(void) comm;
return 0;
#endif
}
template<class T>
std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(data, comm, typename std::is_pod<T>::type());
}
template<class T1, class T2>
std::size_t packSize(const std::pair<T1,T2>& data, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(data.first, comm) + packSize(data.second, comm);
}
template<class T>
std::size_t packSize(const std::vector<T>& data, Dune::MPIHelper::MPICommunicator comm)
{
if (std::is_pod<T>::value)
// size written automatically
return packSize(data.data(), data.size(), comm);
std::size_t size = packSize(data.size(), comm);
for (const auto& entry: data)
size+=packSize(entry, comm);
return size;
}
std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm)
{
#if HAVE_MPI
int size;
MPI_Pack_size(1, Dune::MPITraits<std::size_t>::getType(), comm, &size);
int totalSize = size;
MPI_Pack_size(strlen(str)+1, MPI_CHAR, comm, &size);
return totalSize + size;
#else
(void) str;
(void) comm;
return 0;
#endif
}
std::size_t packSize(const std::string& str, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(str.c_str(), comm);
}
template<class T1, class T2>
std::size_t packSize(const std::map<T1,T2> data, Dune::MPIHelper::MPICommunicator comm)
{
std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry: data)
{
totalSize += packSize(entry, comm);
}
return totalSize;
}
template<class T1, class T2>
std::size_t packSize(const std::unordered_map<T1,T2> data, Dune::MPIHelper::MPICommunicator comm)
{
std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry: data)
{
totalSize += packSize(entry, comm);
}
return totalSize;
}
std::size_t packSize(const data::Rates& data, Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
return packSize(data, comm, std::integral_constant<bool,true>());
}
std::size_t packSize(const data::Connection& data, Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
return packSize(data, comm, std::integral_constant<bool,true>());
}
std::size_t packSize(const data::Segment& data, Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
return packSize(data, comm, std::integral_constant<bool,true>());
}
std::size_t packSize(const data::Well& data, Dune::MPIHelper::MPICommunicator comm)
{
std::size_t size = packSize(data.rates, comm);
size += packSize(data.bhp, comm) + packSize(data.thp, comm);
size += packSize(data.temperature, comm);
size += packSize(data.control, comm);
size += packSize(data.connections, comm);
size += packSize(data.segments, comm);
return size;
}
std::size_t packSize(const data::CellData& data, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(data.dim, comm) + packSize(data.data, comm) + packSize(data.target, comm);
}
std::size_t packSize(const RestartKey& data, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(data.key, comm) + packSize(data.dim, comm) + packSize(data.required, comm);
}
std::size_t packSize(const data::Solution& data, Dune::MPIHelper::MPICommunicator comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
return packSize(static_cast<const std::map< std::string, data::CellData>&>(data), comm);
}
std::size_t packSize(const data::WellRates& data, Dune::MPIHelper::MPICommunicator comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
return packSize(static_cast<const std::map< std::string, data::Well>&>(data), comm);
}
std::size_t packSize(const RestartValue& data, Dune::MPIHelper::MPICommunicator comm)
{
return packSize(data.solution, comm) + packSize(data.wells, comm) + packSize(data.extra, comm);
}
////// pack routines
template<class T>
void pack(const T*, std::size_t, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>)
{
#if HAVE_MPI
MPI_Pack(&l, 1, Dune::MPITraits<std::size_t>::getType(), buffer.data(),
buffer.size(), &position, comm);
MPI_Pack(data, l, Dune::MPITraits<T>::getType(), buffer.data(),
buffer.size(), &position, comm);
#else
(void) data;
(void) comm;
(void) l;
(void) buffer;
(void) position;
#endif
}
template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data, l, buffer, position, comm, typename std::is_pod<T>::type());
}
template<class T>
void pack(const T&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template<class T>
void pack(const T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>)
{
#if HAVE_MPI
MPI_Pack(&data, 1, Dune::MPITraits<T>::getType(), buffer.data(),
buffer.size(), &position, comm);
#else
(void) data;
(void) comm;
(void) buffer;
(void) position;
#endif
}
template<class T>
void pack(const T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data, buffer, position, comm, typename std::is_pod<T>::type());
}
template<class T1, class T2>
void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.first, buffer, position, comm);
pack(data.second, buffer, position, comm);
}
template<class T>
void pack(const std::vector<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
if (std::is_pod<T>::value)
{
// size written automatically
pack(data.data(), data.size(), buffer, position, comm);
return;
}
pack(data.size(), buffer, position, comm);
for (const auto& entry: data)
pack(entry, buffer, position, comm);
}
void pack(const char* str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
#if HAVE_MPI
std::size_t length = strlen(str)+1;
MPI_Pack(&length, 1, Dune::MPITraits<std::size_t>::getType(), buffer.data(),
buffer.size(), &position, comm);
MPI_Pack(str, strlen(str)+1, MPI_CHAR, buffer.data(), buffer.size(),
&position, comm);
#else
(void) str;
(void) comm;
(void) buffer;
(void) position;
#endif
}
void pack(const std::string& str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(str.c_str(), buffer, position, comm);
}
template<class T1, class T2>
void pack(const std::map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.size(), buffer, position, comm);
for (const auto& entry: data)
{
pack(entry, buffer, position, comm);
}
}
template<class T1, class T2>
void pack(const std::unordered_map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.size(), buffer, position, comm);
for (const auto& entry: data)
{
pack(entry, buffer, position, comm);
}
}
void pack(const data::Rates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
pack(data, buffer, position, comm, std::integral_constant<bool,true>());
}
void pack(const data::Connection& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
pack(data, buffer, position, comm, std::integral_constant<bool,true>());
}
void pack(const data::Segment& data,std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
pack(data, buffer, position, comm, std::integral_constant<bool,true>());
}
void pack(const data::Well& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.rates, buffer, position, comm);
pack(data.bhp, buffer, position, comm);
pack(data.thp, buffer, position, comm);
pack(data.temperature, buffer, position, comm);
pack(data.control, buffer, position, comm);
pack(data.connections, buffer, position, comm);
pack(data.segments, buffer, position, comm);
}
void pack(const RestartKey& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.key, buffer, position, comm);
pack(data.dim, buffer, position, comm);
pack(data.required, buffer, position, comm);
}
void pack(const data::CellData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.dim, buffer, position, comm);
pack(data.data, buffer, position, comm);
pack(data.target, buffer, position, comm);
}
void pack(const data::Solution& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
pack(static_cast<const std::map< std::string, data::CellData>&>(data),
buffer, position, comm);
}
void pack(const data::WellRates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
pack(static_cast<const std::map< std::string, data::Well>&>(data),
buffer, position, comm);
}
void pack(const RestartValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
pack(data.solution, buffer, position, comm);
pack(data.wells, buffer, position, comm);
pack(data.extra, buffer, position, comm);
}
/// unpack routines
template<class T>
void unpack(T*, const std::size_t&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>)
{
#if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, data, l,
Dune::MPITraits<T>::getType(), comm);
#else
(void) data;
(void) comm;
(void) l;
(void) buffer;
(void) position;
#endif
}
template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data, l, buffer, position, comm, typename std::is_pod<T>::type());
}
template<class T>
void unpack(T&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template<class T>
void unpack(T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>)
{
#if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1,
Dune::MPITraits<T>::getType(), comm);
#else
(void) data;
(void) comm;
(void) buffer;
(void) position;
#endif
}
template<class T>
void unpack(T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data, buffer, position, comm, typename std::is_pod<T>::type());
}
template<class T1, class T2>
void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data.first, buffer, position, comm);
unpack(data.second, buffer, position, comm);
}
template<class T>
void unpack(std::vector<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
std::size_t length=0;
unpack(length, buffer, position, comm);
data.resize(length);
if (std::is_pod<T>::value)
{
unpack(data.data(), data.size(), buffer, position, comm);
return;
}
for (auto& entry: data)
unpack(entry, buffer, position, comm);
}
void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
#if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast<char*>(str), length, MPI_CHAR, comm);
#else
(void) str;
(void) comm;
(void) length;
(void) buffer;
(void) position;
#endif
}
void unpack(std::string& str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
std::size_t length=0;
unpack(length, buffer, position, comm);
std::vector<char> cStr(length, '\0');
unpack(cStr.data(), length, buffer, position, comm);
assert(str.empty());
str.append(cStr.data());
}
template<class T1, class T2>
void unpack(std::map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
std::size_t size=0;
unpack(size, buffer, position, comm);
for (;size>0; size--)
{
std::pair<T1,T2> entry;
unpack(entry, buffer, position, comm);
data.insert(entry);
}
}
template<class T1, class T2>
void unpack(std::unordered_map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
std::size_t size=0;
unpack(size, buffer, position, comm);
for (;size>0; size--)
{
std::pair<T1,T2> entry;
unpack(entry, buffer, position, comm);
data.insert(entry);
}
}
void unpack(data::Rates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
unpack(data, buffer, position, comm, std::integral_constant<bool,true>());
}
void unpack(data::Connection& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
unpack(data, buffer, position, comm, std::integral_constant<bool,true>());
}
void unpack(data::Segment& data,std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// plain struct no pointers -> treat as pod
unpack(data, buffer, position, comm, std::integral_constant<bool,true>());
}
void unpack(data::Well& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data.rates, buffer, position, comm);
unpack(data.bhp, buffer, position, comm);
unpack(data.thp, buffer, position, comm);
unpack(data.temperature, buffer, position, comm);
unpack(data.control, buffer, position, comm);
unpack(data.connections, buffer, position, comm);
unpack(data.segments, buffer, position, comm);
}
void unpack(RestartKey& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data.key, buffer, position, comm);
unpack(data.dim, buffer, position, comm);
unpack(data.required, buffer, position, comm);
}
void unpack(data::CellData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data.dim, buffer, position, comm);
unpack(data.data, buffer, position, comm);
unpack(data.target, buffer, position, comm);
}
void unpack(data::Solution& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
unpack(static_cast<std::map< std::string, data::CellData>&>(data),
buffer, position, comm);
}
void unpack(data::WellRates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
unpack(static_cast<std::map< std::string, data::Well>&>(data),
buffer, position, comm);
}
void unpack(RestartValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm)
{
unpack(data.solution, buffer, position, comm);
unpack(data.wells, buffer, position, comm);
unpack(data.extra, buffer, position, comm);
}
} // end namespace Mpi
RestartValue loadParallelRestart(const EclipseIO* eclIO, SummaryState& summaryState,
const std::vector<Opm::RestartKey>& solutionKeys,
const std::vector<Opm::RestartKey>& extraKeys,
Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm)
{
#if HAVE_MPI
data::Solution sol;
data::Wells wells;
RestartValue restartValues(sol, wells);
if (eclIO)
{
assert(comm.rank() == 0);
restartValues = eclIO->loadRestart(summaryState, solutionKeys, extraKeys);
int packedSize = Mpi::packSize(restartValues, comm);
std::vector<char> buffer(packedSize);
int position=0;
Mpi::pack(restartValues, buffer, position, comm);
comm.broadcast(&position, 1, 0);
comm.broadcast(buffer.data(), position, 0);
}
else
{
int bufferSize{};
comm.broadcast(&bufferSize, 1, 0);
std::vector<char> buffer(bufferSize);
comm.broadcast(buffer.data(), bufferSize, 0);
int position{};
Mpi::unpack(restartValues, buffer, position, comm);
}
return restartValues;
#else
(void) comm;
return eclIO->loadRestart(summaryState, solutionKeys, extraKeys);
#endif
}
} // end namespace Opm

View File

@@ -0,0 +1,258 @@
/*
Copyright 2019 Equinor AS.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PARALLEL_RESTART_HPP
#define PARALLEL_RESTART_HPP
#if HAVE_MPI
#include <mpi.h>
#endif
#include <opm/output/eclipse/RestartValue.hpp>
#include <opm/output/eclipse/EclipseIO.hpp>
#include <opm/output/eclipse/Summary.hpp>
#include <dune/common/parallel/mpihelper.hh>
#include <vector>
#include <map>
#include <unordered_map>
namespace Opm
{
namespace Mpi
{
template<class T>
std::size_t packSize(const T*, std::size_t, Dune::MPIHelper::MPICommunicator,
std::integral_constant<bool, false>);
template<class T>
std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>);
template<class T>
std::size_t packSize(const T* data, std::size_t l, Dune::MPIHelper::MPICommunicator comm);
template<class T>
std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator,
std::integral_constant<bool, false>);
template<class T>
std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>);
template<class T>
std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
std::size_t packSize(const std::pair<T1,T2>& data, Dune::MPIHelper::MPICommunicator comm);
template<class T>
std::size_t packSize(const std::vector<T>& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const std::string& str, Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
std::size_t packSize(const std::map<T1,T2> data, Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
std::size_t packSize(const std::unordered_map<T1,T2> data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::Rates& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::Connection& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::Segment& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::Well& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::CellData& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const RestartKey& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::Solution& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const data::WellRates& data, Dune::MPIHelper::MPICommunicator comm);
std::size_t packSize(const RestartValue& data, Dune::MPIHelper::MPICommunicator comm);
////// pack routines
template<class T>
void pack(const T*, std::size_t, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>);
template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>);
template<class T>
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T>
void pack(const T&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>);
template<class T>
void pack(const T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>);
template<class T>
void pack(const T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T>
void pack(const std::vector<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const char* str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const std::string& str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
void pack(const std::map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
void pack(const std::unordered_map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::Rates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::Connection& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::Segment& data,std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::Well& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const RestartKey& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::CellData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::Solution& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const data::WellRates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void pack(const RestartValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
/// unpack routines
template<class T>
void unpack(T*, const std::size_t&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>);
template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm,
std::integral_constant<bool, true>);
template<class T>
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T>
void unpack(T&, std::vector<char>&, int&,
Dune::MPIHelper::MPICommunicator, std::integral_constant<bool, false>);
template<class T>
void unpack(T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm, std::integral_constant<bool, true>);
template<class T>
void unpack(T& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T>
void unpack(std::vector<T>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(std::string& str, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
void unpack(std::map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
template<class T1, class T2>
void unpack(std::unordered_map<T1,T2>& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::Rates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::Connection& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::Segment& data,std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::Well& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(RestartKey& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::CellData& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::Solution& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(data::WellRates& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
void unpack(RestartValue& data, std::vector<char>& buffer, int& position,
Dune::MPIHelper::MPICommunicator comm);
} // end namespace Mpi
RestartValue loadParallelRestart(const EclipseIO* eclIO, SummaryState& summaryState,
const std::vector<Opm::RestartKey>& solutionKeys,
const std::vector<Opm::RestartKey>& extraKeys,
Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> comm);
} // end namespace Opm
#endif // PARALLEL_RESTART_HPP