diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index 01cd5dc24..b3b215a93 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -44,6 +44,7 @@ list (APPEND MAIN_SOURCE_FILES opm/simulators/utils/DeferredLogger.cpp opm/simulators/utils/gatherDeferredLogger.cpp opm/simulators/utils/moduleVersion.cpp + opm/simulators/utils/ParallelRestart.cpp opm/simulators/wells/VFPProdProperties.cpp opm/simulators/wells/VFPInjProperties.cpp ) @@ -187,6 +188,7 @@ list (APPEND PUBLIC_HEADER_FILES opm/simulators/utils/DeferredLogger.hpp opm/simulators/utils/gatherDeferredLogger.hpp opm/simulators/utils/moduleVersion.hpp + opm/simulators/utils/ParallelRestart.hpp opm/simulators/wells/RateConverter.hpp opm/simulators/wells/SimFIBODetails.hpp opm/simulators/wells/WellConnectionAuxiliaryModule.hpp diff --git a/ebos/collecttoiorank.hh b/ebos/collecttoiorank.hh index b5e9856a1..c46bc3ad5 100644 --- a/ebos/collecttoiorank.hh +++ b/ebos/collecttoiorank.hh @@ -101,6 +101,7 @@ public: IndexMapType& localIndexMap_; IndexMapStorageType& indexMaps_; std::map globalPosition_; + std::set& recv_; std::vector& ranks_; public: @@ -108,21 +109,26 @@ public: const std::vector& distributedGlobalIndex, IndexMapType& localIndexMap, IndexMapStorageType& indexMaps, - std::vector& ranks) + std::vector& ranks, + std::set& recv, + bool isIORank) : distributedGlobalIndex_(distributedGlobalIndex) , localIndexMap_(localIndexMap) , indexMaps_(indexMaps) , globalPosition_() + , recv_(recv) , ranks_(ranks) { size_t size = globalIndex.size(); // create mapping globalIndex --> localIndex - for (size_t index = 0; index < size; ++index) - globalPosition_.insert(std::make_pair(globalIndex[index], index)); + if ( isIORank ) // ioRank + for (size_t index = 0; index < size; ++index) + globalPosition_.insert(std::make_pair(globalIndex[index], index)); // we need to create a mapping from local to global if (!indexMaps_.empty()) { - ranks_.resize(size, -1); + if (isIORank) + ranks_.resize(size, -1); // for the ioRank create a localIndex to index in global state map IndexMapType& indexMap = indexMaps_.back(); size_t localSize = localIndexMap_.size(); @@ -130,12 +136,44 @@ public: for (size_t i=0; isecond; + // Using max should be backwards compatible + ranks_[entry] = std::max(ranks_[entry], rank); + } + if (rankIt != recv_.end()) + ++rankIt; + } +#ifndef NDEBUG + for (const auto& rank: ranks_) + assert(rank>=0); +#endif + } + } + void pack(int link, MessageBufferType& buffer) { // we should only get one link @@ -163,15 +201,89 @@ public: buffer.read(numCells); indexMap.resize(numCells); for (int index = 0; index < numCells; ++index) { - int globalId = -1; - buffer.read(globalId); - assert(globalPosition_.find(globalId) != globalPosition_.end()); - indexMap[index] = globalPosition_[globalId]; - ranks_[indexMap[index]] = link + 1; + buffer.read(indexMap[index]); } } }; + /// \brief Communication handle to scatter the global index + template + class ElementIndexScatterHandle + { + public: + ElementIndexScatterHandle(const Mapper& sendMapper, const Mapper& recvMapper, std::vector& elementIndices) + : sendMapper_(sendMapper), recvMapper_(recvMapper), elementIndices_(elementIndices) + {} + using DataType = int; + bool fixedsize(int /*dim*/, int /*codim*/) + { + return true; + } + + template + std::size_t size(const T&) + { + return 1; + } + template + void gather(B& buffer, const T& t) + { + buffer.write(sendMapper_.index(t)); + } + template + void scatter(B& buffer, const T& t, std::size_t) + { + buffer.read(elementIndices_[recvMapper_.index(t)]); + } + + bool contains(int dim, int codim) + { + return dim==3 && codim==0; + } + private: + const Mapper& sendMapper_, recvMapper_; + std::vector& elementIndices_; + }; + + /// \brief Communication handle to scatter the global index + template + class ElementIndexHandle + { + public: + ElementIndexHandle(const Mapper& mapper, std::vector& elementIndices) + : mapper_(mapper), elementIndices_(elementIndices) + {} + using DataType = int; + bool fixedsize(int /*dim*/, int /*codim*/) + { + return true; + } + + template + std::size_t size(const T&) + { + return 1; + } + template + void gather(B& buffer, const T& t) + { + buffer.write(elementIndices_[mapper_.index(t)]); + } + template + void scatter(B& buffer, const T& t, std::size_t) + { + buffer.read(elementIndices_[mapper_.index(t)]); + } + + bool contains(int dim, int codim) + { + return dim==3 && codim==0; + } + private: + const Mapper& mapper_; + std::vector& elementIndices_; + }; + enum { ioRank = 0 }; static const bool needsReordering = @@ -189,48 +301,6 @@ public: { std::set send, recv; typedef typename Vanguard::EquilGrid::LeafGridView EquilGridView; - const EquilGridView equilGridView = vanguard.equilGrid().leafGridView(); - -#if DUNE_VERSION_NEWER(DUNE_GRID, 2,6) - typedef Dune::MultipleCodimMultipleGeomTypeMapper EquilElementMapper; - EquilElementMapper equilElemMapper(equilGridView, Dune::mcmgElementLayout()); -#else - typedef Dune::MultipleCodimMultipleGeomTypeMapper EquilElementMapper; - EquilElementMapper equilElemMapper(equilGridView); -#endif - - // We need a mapping from local to global grid, here we - // use equilGrid which represents a view on the global grid - const size_t globalSize = vanguard.equilGrid().leafGridView().size(0); - // reserve memory - globalCartesianIndex_.resize(globalSize, -1); - - // loop over all elements (global grid) and store Cartesian index - auto elemIt = vanguard.equilGrid().leafGridView().template begin<0>(); - const auto& elemEndIt = vanguard.equilGrid().leafGridView().template end<0>(); - for (; elemIt != elemEndIt; ++elemIt) { - int elemIdx = equilElemMapper.index(*elemIt); - int cartElemIdx = vanguard.equilCartesianIndexMapper().cartesianIndex(elemIdx); - globalCartesianIndex_[elemIdx] = cartElemIdx; - } - - // the I/O rank receives from all other ranks - if (isIORank()) { - for (int i = 0; i < comm.size(); ++i) { - if (i != ioRank) - recv.insert(i); - } - } - else // all other simply send to the I/O rank - send.insert(ioRank); - - localIndexMap_.clear(); - const size_t gridSize = vanguard.grid().size(0); - localIndexMap_.reserve(gridSize); - - // store the local Cartesian index - IndexMapType distributedCartesianIndex; - distributedCartesianIndex.resize(gridSize, -1); typedef typename Vanguard::GridView LocalGridView; const LocalGridView localGridView = vanguard.gridView(); @@ -243,6 +313,65 @@ public: ElementMapper elemMapper(localGridView); #endif + localIdxToGlobalIdx_.resize(localGridView.size(0), -1); + + // the I/O rank receives from all other ranks + if (isIORank()) { + // We need a mapping from local to global grid, here we + // use equilGrid which represents a view on the global grid + // reserve memory + const size_t globalSize = vanguard.equilGrid().leafGridView().size(0); + globalCartesianIndex_.resize(globalSize, -1); + const EquilGridView equilGridView = vanguard.equilGrid().leafGridView(); + +#if DUNE_VERSION_NEWER(DUNE_GRID, 2,6) + typedef Dune::MultipleCodimMultipleGeomTypeMapper EquilElementMapper; + EquilElementMapper equilElemMapper(equilGridView, Dune::mcmgElementLayout()); +#else + typedef Dune::MultipleCodimMultipleGeomTypeMapper EquilElementMapper; + EquilElementMapper equilElemMapper(equilGridView); +#endif + + // Scatter the global index to local index for lookup during restart + ElementIndexScatterHandle handle(equilElemMapper, elemMapper, localIdxToGlobalIdx_); + vanguard.grid().scatterData(handle); + + // loop over all elements (global grid) and store Cartesian index + auto elemIt = vanguard.equilGrid().leafGridView().template begin<0>(); + const auto& elemEndIt = vanguard.equilGrid().leafGridView().template end<0>(); + for (; elemIt != elemEndIt; ++elemIt) { + int elemIdx = equilElemMapper.index(*elemIt); + int cartElemIdx = vanguard.equilCartesianIndexMapper().cartesianIndex(elemIdx); + globalCartesianIndex_[elemIdx] = cartElemIdx; + } + + for (int i = 0; i < comm.size(); ++i) { + if (i != ioRank) + recv.insert(i); + } + } + else + { + // all other simply send to the I/O rank + send.insert(ioRank); + + // Scatter the global index to local index for lookup during restart + ElementIndexScatterHandle handle(elemMapper, elemMapper, localIdxToGlobalIdx_); + vanguard.grid().scatterData(handle); + } + + // Sync the global element indices + ElementIndexHandle handle(elemMapper, localIdxToGlobalIdx_); + vanguard.grid().communicate(handle, Dune::InteriorBorder_All_Interface, + Dune::ForwardCommunication); + localIndexMap_.clear(); + const size_t gridSize = vanguard.grid().size(0); + localIndexMap_.reserve(gridSize); + + // store the local Cartesian index + IndexMapType distributedCartesianIndex; + distributedCartesianIndex.resize(gridSize, -1); + // A mapping for the whole grid (including the ghosts) is needed for restarts auto eIt = localGridView.template begin<0>(); const auto& eEndIt = localGridView.template end<0>(); @@ -269,7 +398,9 @@ public: distributedCartesianIndex, localIndexMap_, indexMaps_, - globalRanks_); + globalRanks_, + recv, + isIORank()); toIORankComm_.exchange(distIndexMapping); } } @@ -547,15 +678,13 @@ public: if (!isParallel()) return localIdx; - // the last indexMap is the local one - const IndexMapType& indexMap = indexMaps_.back(); - if (indexMap.empty()) + if (localIdxToGlobalIdx_.empty()) throw std::logic_error("index map is not created on this rank"); - if (localIdx > indexMap.size()) + if (localIdx > localIdxToGlobalIdx_.size()) throw std::logic_error("local index is outside map range"); - return indexMap[localIdx]; + return localIdxToGlobalIdx_[localIdx]; } size_t numCells () const @@ -569,12 +698,10 @@ public: if (!isParallel()) return true; - // the last indexMap is the local one - const IndexMapType& indexMap = indexMaps_.back(); - if (indexMap.empty()) + if (localIdxToGlobalIdx_.empty()) throw std::logic_error("index map is not created on this rank"); - return std::find(indexMap.begin(), indexMap.end(), globalIdx) != indexMap.end(); + return std::find(localIdxToGlobalIdx_.begin(), localIdxToGlobalIdx_.end(), globalIdx) != localIdxToGlobalIdx_.end(); } protected: @@ -586,6 +713,7 @@ protected: Opm::data::Solution globalCellData_; std::map, double> globalBlockData_; Opm::data::Wells globalWellData_; + std::vector localIdxToGlobalIdx_; }; } // end namespace Opm diff --git a/ebos/eclcpgridvanguard.hh b/ebos/eclcpgridvanguard.hh index f88c99b4d..527377c28 100644 --- a/ebos/eclcpgridvanguard.hh +++ b/ebos/eclcpgridvanguard.hh @@ -83,20 +83,14 @@ private: public: EclCpGridVanguard(Simulator& simulator) - : EclBaseVanguard(simulator) + : EclBaseVanguard(simulator), mpiRank() { +#if HAVE_MPI + MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); +#endif this->callImplementationInit(); } - ~EclCpGridVanguard() - { - delete cartesianIndexMapper_; - delete equilCartesianIndexMapper_; - delete grid_; - delete equilGrid_; - delete globalTrans_; - } - /*! * \brief Return a reference to the simulation grid. */ @@ -119,7 +113,10 @@ public: * same as the grid which is used for the actual simulation. */ const EquilGrid& equilGrid() const - { return *equilGrid_; } + { + assert(mpiRank == 0); + return *equilGrid_; + } /*! * \brief Indicates that the initial condition has been computed and the memory used @@ -130,11 +127,8 @@ public: */ void releaseEquilGrid() { - delete equilGrid_; - equilGrid_ = 0; - - delete equilCartesianIndexMapper_; - equilCartesianIndexMapper_ = 0; + equilGrid_.reset(); + equilCartesianIndexMapper_.reset(); } /*! @@ -145,9 +139,7 @@ public: void loadBalance() { #if HAVE_MPI - int mpiRank = 0; int mpiSize = 1; - MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); if (mpiSize > 1) { @@ -155,9 +147,12 @@ public: // its edge weights. since this is (kind of) a layering violation and // transmissibilities are relatively expensive to compute, we only do it if // more than a single process is involved in the simulation. - cartesianIndexMapper_ = new CartesianIndexMapper(*grid_); - globalTrans_ = new EclTransmissibility(*this); - globalTrans_->update(); + cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_)); + if (grid_->size(0)) + { + globalTrans_.reset(new EclTransmissibility(*this)); + globalTrans_->update(); + } Dune::EdgeWeightMethod edgeWeightsMethod = this->edgeWeightsMethod(); @@ -200,12 +195,11 @@ public: } grid_->switchToDistributedView(); - delete cartesianIndexMapper_; - cartesianIndexMapper_ = nullptr; + cartesianIndexMapper_.reset(); } #endif - cartesianIndexMapper_ = new CartesianIndexMapper(*grid_); + cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_)); this->updateGridView_(); } @@ -217,8 +211,7 @@ public: */ void releaseGlobalTransmissibilities() { - delete globalTrans_; - globalTrans_ = nullptr; + globalTrans_.reset(); } /*! @@ -232,18 +225,24 @@ public: * \brief Returns mapper from compressed to cartesian indices for the EQUIL grid */ const CartesianIndexMapper& equilCartesianIndexMapper() const - { return *equilCartesianIndexMapper_; } + { + assert(mpiRank == 0); + assert(equilCartesianIndexMapper_); + return *equilCartesianIndexMapper_; + } std::unordered_set defunctWellNames() const { return defunctWellNames_; } const EclTransmissibility& globalTransmissibility() const - { return *globalTrans_; } + { + assert( globalTrans_ != nullptr ); + return *globalTrans_; + } void releaseGlobalTransmissibility() { - delete globalTrans_; - globalTrans_ = nullptr; + globalTrans_.reset(); } protected: @@ -252,7 +251,7 @@ protected: const auto& gridProps = this->eclState().get3DProperties(); const std::vector& porv = gridProps.getDoubleGridProperty("PORV").getData(); - grid_ = new Dune::CpGrid(); + grid_.reset(new Dune::CpGrid()); grid_->processEclipseFormat(this->eclState().getInputGrid(), /*isPeriodic=*/false, /*flipNormals=*/false, @@ -266,29 +265,31 @@ protected: // the initial condition is calculated. // After loadbalance grid_ will contain a global and distribute view. // equilGrid_being a shallow copy only the global view. - equilGrid_ = new Dune::CpGrid(*grid_); - equilCartesianIndexMapper_ = new CartesianIndexMapper(*equilGrid_); - - globalTrans_ = nullptr; + if (mpiRank == 0) + { + equilGrid_.reset(new Dune::CpGrid(*grid_)); + equilCartesianIndexMapper_.reset(new CartesianIndexMapper(*equilGrid_)); + } } // removing some connection located in inactive grid cells void filterConnections_() { - assert(grid_); - Grid grid = *grid_; - grid.switchToGlobalView(); - const auto eclipseGrid = Opm::UgGridHelpers::createEclipseGrid(grid, this->eclState().getInputGrid()); - this->schedule().filterConnections(eclipseGrid); + if (equilGrid_) + { + const auto eclipseGrid = Opm::UgGridHelpers::createEclipseGrid(equilGrid(), this->eclState().getInputGrid()); + this->schedule().filterConnections(eclipseGrid); + } } - Grid* grid_; - EquilGrid* equilGrid_; - CartesianIndexMapper* cartesianIndexMapper_; - CartesianIndexMapper* equilCartesianIndexMapper_; + std::unique_ptr grid_; + std::unique_ptr equilGrid_; + std::unique_ptr cartesianIndexMapper_; + std::unique_ptr equilCartesianIndexMapper_; - EclTransmissibility* globalTrans_; + std::unique_ptr > globalTrans_; std::unordered_set defunctWellNames_; + int mpiRank; }; } // namespace Opm diff --git a/ebos/eclwriter.hh b/ebos/eclwriter.hh index 3146b6ea1..70d7f077a 100644 --- a/ebos/eclwriter.hh +++ b/ebos/eclwriter.hh @@ -43,6 +43,7 @@ #include #include +#include #include #include @@ -178,12 +179,14 @@ public: , collectToIORank_(simulator_.vanguard()) , eclOutputModule_(simulator, collectToIORank_) { - globalGrid_ = simulator_.vanguard().grid(); - globalGrid_.switchToGlobalView(); - eclIO_.reset(new Opm::EclipseIO(simulator_.vanguard().eclState(), - Opm::UgGridHelpers::createEclipseGrid(globalGrid_, simulator_.vanguard().eclState().getInputGrid()), - simulator_.vanguard().schedule(), - simulator_.vanguard().summaryConfig())); + if (collectToIORank_.isIORank()) { + globalGrid_ = simulator_.vanguard().grid(); + globalGrid_.switchToGlobalView(); + eclIO_.reset(new Opm::EclipseIO(simulator_.vanguard().eclState(), + Opm::UgGridHelpers::createEclipseGrid(globalGrid_, simulator_.vanguard().eclState().getInputGrid()), + simulator_.vanguard().schedule(), + simulator_.vanguard().summaryConfig())); + } // create output thread if enabled and rank is I/O rank // async output is enabled by default if pthread are enabled @@ -198,7 +201,10 @@ public: { } const Opm::EclipseIO& eclIO() const - { return *eclIO_; } + { + assert(eclIO_); + return *eclIO_; + } void writeInit() { @@ -239,7 +245,6 @@ public: if (reportStepNum == 0) return; - const auto& summary = eclIO_->summary(); Scalar curTime = simulator_.time() + simulator_.timeStepSize(); Scalar totalCpuTime = simulator_.executionTimer().realTimeElapsed() + @@ -272,6 +277,7 @@ public: std::vector buffer; if (collectToIORank_.isIORank()) { + const auto& summary = eclIO_->summary(); const auto& eclState = simulator_.vanguard().eclState(); // Add TCPU @@ -423,7 +429,8 @@ public: { Opm::SummaryState& summaryState = simulator_.vanguard().summaryState(); - auto restartValues = eclIO_->loadRestart(summaryState, solutionKeys, extraKeys); + auto restartValues = loadParallelRestart(eclIO_.get(), summaryState, solutionKeys, extraKeys, + gridView.grid().comm()); for (unsigned elemIdx = 0; elemIdx < numElements; ++elemIdx) { unsigned globalIdx = collectToIORank_.localIdxToGlobalIdx(elemIdx); @@ -484,9 +491,10 @@ private: #endif const auto& cartesianCellIdx = globalGrid_.globalCell(); - const auto* globalTrans = &(simulator_.vanguard().globalTransmissibility()); + const EclTransmissibility* globalTrans; if (!collectToIORank_.isParallel()) + { // in the sequential case we must use the transmissibilites defined by // the problem. (because in the sequential case, the grid manager does // not compute "global" transmissibilities for performance reasons. in @@ -494,6 +502,11 @@ private: // because this object refers to the distributed grid and we need the // sequential version here.) globalTrans = &simulator_.problem().eclTransmissibilities(); + } + else + { + globalTrans = &(simulator_.vanguard().globalTransmissibility()); + } auto elemIt = globalGridView.template begin(); const auto& elemEndIt = globalGridView.template end(); @@ -583,7 +596,7 @@ private: ElementMapper globalElemMapper(globalGridView); #endif - const auto* globalTrans = &(simulator_.vanguard().globalTransmissibility()); + const EclTransmissibility* globalTrans; if (!collectToIORank_.isParallel()) { // in the sequential case we must use the transmissibilites defined by // the problem. (because in the sequential case, the grid manager does @@ -593,6 +606,10 @@ private: // sequential version here.) globalTrans = &simulator_.problem().eclTransmissibilities(); } + else + { + globalTrans = &(simulator_.vanguard().globalTransmissibility()); + } auto cartDims = simulator_.vanguard().cartesianIndexMapper().cartesianDimensions(); auto elemIt = globalGridView.template begin(); diff --git a/opm/simulators/utils/ParallelRestart.cpp b/opm/simulators/utils/ParallelRestart.cpp new file mode 100644 index 000000000..792f83b58 --- /dev/null +++ b/opm/simulators/utils/ParallelRestart.cpp @@ -0,0 +1,674 @@ +/* + Copyright 2019 Equinor AS. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#include +#if HAVE_MPI +#include +#endif + +#include "ParallelRestart.hpp" +#include + +namespace Opm +{ +namespace Mpi +{ +template +std::size_t packSize(const T*, std::size_t, Dune::MPIHelper::MPICommunicator, + std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator comm, + std::integral_constant) +{ +#if HAVE_MPI + int size; + MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); + std::size_t totalSize = size; + MPI_Pack_size(l, Dune::MPITraits::getType(), comm, &size); + return totalSize + size; +#else + (void) comm; + return l-l; +#endif +} + +template +std::size_t packSize(const T* data, std::size_t l, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(data, l, comm, typename std::is_pod::type()); +} + +template +std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator, + std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator comm, + std::integral_constant) +{ +#if HAVE_MPI + int size{}; + MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); + return size; +#else + (void) comm; + return 0; +#endif +} + +template +std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(data, comm, typename std::is_pod::type()); +} + +template +std::size_t packSize(const std::pair& data, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(data.first, comm) + packSize(data.second, comm); +} + +template +std::size_t packSize(const std::vector& data, Dune::MPIHelper::MPICommunicator comm) +{ + if (std::is_pod::value) + // size written automatically + return packSize(data.data(), data.size(), comm); + + std::size_t size = packSize(data.size(), comm); + + for (const auto& entry: data) + size+=packSize(entry, comm); + + return size; +} + +std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm) +{ +#if HAVE_MPI + int size; + MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); + int totalSize = size; + MPI_Pack_size(strlen(str)+1, MPI_CHAR, comm, &size); + return totalSize + size; +#else + (void) str; + (void) comm; + return 0; +#endif +} + +std::size_t packSize(const std::string& str, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(str.c_str(), comm); +} + +template +std::size_t packSize(const std::map data, Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t totalSize = packSize(data.size(), comm); + for (const auto& entry: data) + { + totalSize += packSize(entry, comm); + } + return totalSize; +} + +template +std::size_t packSize(const std::unordered_map data, Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t totalSize = packSize(data.size(), comm); + for (const auto& entry: data) + { + totalSize += packSize(entry, comm); + } + return totalSize; +} + +std::size_t packSize(const data::Rates& data, Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + return packSize(data, comm, std::integral_constant()); +} + +std::size_t packSize(const data::Connection& data, Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + return packSize(data, comm, std::integral_constant()); +} + +std::size_t packSize(const data::Segment& data, Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + return packSize(data, comm, std::integral_constant()); +} + +std::size_t packSize(const data::Well& data, Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t size = packSize(data.rates, comm); + size += packSize(data.bhp, comm) + packSize(data.thp, comm); + size += packSize(data.temperature, comm); + size += packSize(data.control, comm); + size += packSize(data.connections, comm); + size += packSize(data.segments, comm); + return size; +} + +std::size_t packSize(const data::CellData& data, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(data.dim, comm) + packSize(data.data, comm) + packSize(data.target, comm); +} + +std::size_t packSize(const RestartKey& data, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(data.key, comm) + packSize(data.dim, comm) + packSize(data.required, comm); +} + +std::size_t packSize(const data::Solution& data, Dune::MPIHelper::MPICommunicator comm) +{ + // Needs explicit conversion to a supported base type holding the data + // to prevent throwing. + return packSize(static_cast&>(data), comm); +} + +std::size_t packSize(const data::WellRates& data, Dune::MPIHelper::MPICommunicator comm) +{ + // Needs explicit conversion to a supported base type holding the data + // to prevent throwing. + return packSize(static_cast&>(data), comm); +} + +std::size_t packSize(const RestartValue& data, Dune::MPIHelper::MPICommunicator comm) +{ + return packSize(data.solution, comm) + packSize(data.wells, comm) + packSize(data.extra, comm); +} + +////// pack routines + +template +void pack(const T*, std::size_t, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, + std::integral_constant) +{ +#if HAVE_MPI + MPI_Pack(&l, 1, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); + MPI_Pack(data, l, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); +#else + (void) data; + (void) comm; + (void) l; + (void) buffer; + (void) position; +#endif +} + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data, l, buffer, position, comm, typename std::is_pod::type()); +} + +template +void pack(const T&, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void pack(const T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, std::integral_constant) +{ +#if HAVE_MPI + MPI_Pack(&data, 1, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); +#else + (void) data; + (void) comm; + (void) buffer; + (void) position; +#endif +} + +template +void pack(const T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data, buffer, position, comm, typename std::is_pod::type()); +} + +template +void pack(const std::pair& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.first, buffer, position, comm); + pack(data.second, buffer, position, comm); +} + +template +void pack(const std::vector& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + if (std::is_pod::value) + { + // size written automatically + pack(data.data(), data.size(), buffer, position, comm); + return; + } + + pack(data.size(), buffer, position, comm); + + for (const auto& entry: data) + pack(entry, buffer, position, comm); +} + +void pack(const char* str, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ +#if HAVE_MPI + std::size_t length = strlen(str)+1; + MPI_Pack(&length, 1, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); + MPI_Pack(str, strlen(str)+1, MPI_CHAR, buffer.data(), buffer.size(), + &position, comm); +#else + (void) str; + (void) comm; + (void) buffer; + (void) position; +#endif +} + +void pack(const std::string& str, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(str.c_str(), buffer, position, comm); +} + +template +void pack(const std::map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.size(), buffer, position, comm); + + for (const auto& entry: data) + { + pack(entry, buffer, position, comm); + } +} + +template +void pack(const std::unordered_map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.size(), buffer, position, comm); + + for (const auto& entry: data) + { + pack(entry, buffer, position, comm); + } +} + +void pack(const data::Rates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + pack(data, buffer, position, comm, std::integral_constant()); +} + +void pack(const data::Connection& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + pack(data, buffer, position, comm, std::integral_constant()); +} + +void pack(const data::Segment& data,std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + pack(data, buffer, position, comm, std::integral_constant()); +} + +void pack(const data::Well& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.rates, buffer, position, comm); + pack(data.bhp, buffer, position, comm); + pack(data.thp, buffer, position, comm); + pack(data.temperature, buffer, position, comm); + pack(data.control, buffer, position, comm); + pack(data.connections, buffer, position, comm); + pack(data.segments, buffer, position, comm); +} + +void pack(const RestartKey& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.key, buffer, position, comm); + pack(data.dim, buffer, position, comm); + pack(data.required, buffer, position, comm); +} + +void pack(const data::CellData& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.dim, buffer, position, comm); + pack(data.data, buffer, position, comm); + pack(data.target, buffer, position, comm); +} + +void pack(const data::Solution& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // Needs explicit conversion to a supported base type holding the data + // to prevent throwing. + pack(static_cast&>(data), + buffer, position, comm); +} + +void pack(const data::WellRates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // Needs explicit conversion to a supported base type holding the data + // to prevent throwing. + pack(static_cast&>(data), + buffer, position, comm); +} + +void pack(const RestartValue& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + pack(data.solution, buffer, position, comm); + pack(data.wells, buffer, position, comm); + pack(data.extra, buffer, position, comm); +} + +/// unpack routines + +template +void unpack(T*, const std::size_t&, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, + std::integral_constant) +{ +#if HAVE_MPI + MPI_Unpack(buffer.data(), buffer.size(), &position, data, l, + Dune::MPITraits::getType(), comm); +#else + (void) data; + (void) comm; + (void) l; + (void) buffer; + (void) position; +#endif +} + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data, l, buffer, position, comm, typename std::is_pod::type()); +} + +template +void unpack(T&, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void unpack(T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, std::integral_constant) +{ +#if HAVE_MPI + MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1, + Dune::MPITraits::getType(), comm); +#else + (void) data; + (void) comm; + (void) buffer; + (void) position; +#endif +} + +template +void unpack(T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data, buffer, position, comm, typename std::is_pod::type()); +} + +template +void unpack(std::pair& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data.first, buffer, position, comm); + unpack(data.second, buffer, position, comm); +} + +template +void unpack(std::vector& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t length=0; + unpack(length, buffer, position, comm); + data.resize(length); + + if (std::is_pod::value) + { + unpack(data.data(), data.size(), buffer, position, comm); + return; + } + + for (auto& entry: data) + unpack(entry, buffer, position, comm); +} + +void unpack(char* str, std::size_t length, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ +#if HAVE_MPI + MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast(str), length, MPI_CHAR, comm); +#else + (void) str; + (void) comm; + (void) length; + (void) buffer; + (void) position; +#endif +} + +void unpack(std::string& str, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t length=0; + unpack(length, buffer, position, comm); + std::vector cStr(length, '\0'); + unpack(cStr.data(), length, buffer, position, comm); + assert(str.empty()); + str.append(cStr.data()); +} + +template +void unpack(std::map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t size=0; + unpack(size, buffer, position, comm); + + for (;size>0; size--) + { + std::pair entry; + unpack(entry, buffer, position, comm); + data.insert(entry); + } +} + +template +void unpack(std::unordered_map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + std::size_t size=0; + unpack(size, buffer, position, comm); + + for (;size>0; size--) + { + std::pair entry; + unpack(entry, buffer, position, comm); + data.insert(entry); + } +} + +void unpack(data::Rates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + unpack(data, buffer, position, comm, std::integral_constant()); +} + +void unpack(data::Connection& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + unpack(data, buffer, position, comm, std::integral_constant()); +} + +void unpack(data::Segment& data,std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // plain struct no pointers -> treat as pod + unpack(data, buffer, position, comm, std::integral_constant()); +} + +void unpack(data::Well& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data.rates, buffer, position, comm); + unpack(data.bhp, buffer, position, comm); + unpack(data.thp, buffer, position, comm); + unpack(data.temperature, buffer, position, comm); + unpack(data.control, buffer, position, comm); + unpack(data.connections, buffer, position, comm); + unpack(data.segments, buffer, position, comm); +} + +void unpack(RestartKey& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data.key, buffer, position, comm); + unpack(data.dim, buffer, position, comm); + unpack(data.required, buffer, position, comm); +} + +void unpack(data::CellData& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data.dim, buffer, position, comm); + unpack(data.data, buffer, position, comm); + unpack(data.target, buffer, position, comm); +} + +void unpack(data::Solution& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // Needs explicit conversion to a supported base type holding the data + // to prevent throwing. + unpack(static_cast&>(data), + buffer, position, comm); +} + +void unpack(data::WellRates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + // Needs explicit conversion to a supported base type holding the data + // to prevent throwing. + unpack(static_cast&>(data), + buffer, position, comm); +} + +void unpack(RestartValue& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm) +{ + unpack(data.solution, buffer, position, comm); + unpack(data.wells, buffer, position, comm); + unpack(data.extra, buffer, position, comm); +} + +} // end namespace Mpi +RestartValue loadParallelRestart(const EclipseIO* eclIO, SummaryState& summaryState, + const std::vector& solutionKeys, + const std::vector& extraKeys, + Dune::CollectiveCommunication comm) +{ +#if HAVE_MPI + data::Solution sol; + data::Wells wells; + RestartValue restartValues(sol, wells); + + if (eclIO) + { + assert(comm.rank() == 0); + restartValues = eclIO->loadRestart(summaryState, solutionKeys, extraKeys); + int packedSize = Mpi::packSize(restartValues, comm); + std::vector buffer(packedSize); + int position=0; + Mpi::pack(restartValues, buffer, position, comm); + comm.broadcast(&position, 1, 0); + comm.broadcast(buffer.data(), position, 0); + } + else + { + int bufferSize{}; + comm.broadcast(&bufferSize, 1, 0); + std::vector buffer(bufferSize); + comm.broadcast(buffer.data(), bufferSize, 0); + int position{}; + Mpi::unpack(restartValues, buffer, position, comm); + } + return restartValues; +#else + (void) comm; + return eclIO->loadRestart(summaryState, solutionKeys, extraKeys); +#endif +} +} // end namespace Opm diff --git a/opm/simulators/utils/ParallelRestart.hpp b/opm/simulators/utils/ParallelRestart.hpp new file mode 100644 index 000000000..cf80420b6 --- /dev/null +++ b/opm/simulators/utils/ParallelRestart.hpp @@ -0,0 +1,258 @@ +/* + Copyright 2019 Equinor AS. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#ifndef PARALLEL_RESTART_HPP +#define PARALLEL_RESTART_HPP + +#if HAVE_MPI +#include +#endif + +#include +#include +#include + +#include + +#include +#include +#include +namespace Opm +{ +namespace Mpi +{ +template +std::size_t packSize(const T*, std::size_t, Dune::MPIHelper::MPICommunicator, + std::integral_constant); + +template +std::size_t packSize(const T*, std::size_t l, Dune::MPIHelper::MPICommunicator comm, + std::integral_constant); + +template +std::size_t packSize(const T* data, std::size_t l, Dune::MPIHelper::MPICommunicator comm); + +template +std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator, + std::integral_constant); + +template +std::size_t packSize(const T&, Dune::MPIHelper::MPICommunicator comm, + std::integral_constant); + +template +std::size_t packSize(const T& data, Dune::MPIHelper::MPICommunicator comm); + +template +std::size_t packSize(const std::pair& data, Dune::MPIHelper::MPICommunicator comm); + +template +std::size_t packSize(const std::vector& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const char* str, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const std::string& str, Dune::MPIHelper::MPICommunicator comm); + +template +std::size_t packSize(const std::map data, Dune::MPIHelper::MPICommunicator comm); + +template +std::size_t packSize(const std::unordered_map data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::Rates& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::Connection& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::Segment& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::Well& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::CellData& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const RestartKey& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::Solution& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const data::WellRates& data, Dune::MPIHelper::MPICommunicator comm); + +std::size_t packSize(const RestartValue& data, Dune::MPIHelper::MPICommunicator comm); + +////// pack routines + +template +void pack(const T*, std::size_t, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant); + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, std::integral_constant); + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void pack(const T&, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant); + +template +void pack(const T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, std::integral_constant); + + +template +void pack(const T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void pack(const std::pair& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void pack(const std::vector& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + + +void pack(const char* str, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const std::string& str, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void pack(const std::map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + + +template +void pack(const std::unordered_map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + + +void pack(const data::Rates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const data::Connection& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const data::Segment& data,std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const data::Well& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const RestartKey& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const data::CellData& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const data::Solution& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const data::WellRates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void pack(const RestartValue& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +/// unpack routines + +template +void unpack(T*, const std::size_t&, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant); + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, + std::integral_constant); + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void unpack(T&, std::vector&, int&, + Dune::MPIHelper::MPICommunicator, std::integral_constant); + +template +void unpack(T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm, std::integral_constant); + +template +void unpack(T& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void unpack(std::pair& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void unpack(std::vector& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + + +void unpack(char* str, std::size_t length, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(std::string& str, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void unpack(std::map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +template +void unpack(std::unordered_map& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::Rates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::Connection& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::Segment& data,std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::Well& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(RestartKey& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::CellData& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::Solution& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(data::WellRates& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +void unpack(RestartValue& data, std::vector& buffer, int& position, + Dune::MPIHelper::MPICommunicator comm); + +} // end namespace Mpi +RestartValue loadParallelRestart(const EclipseIO* eclIO, SummaryState& summaryState, + const std::vector& solutionKeys, + const std::vector& extraKeys, + Dune::CollectiveCommunication comm); + +} // end namespace Opm +#endif // PARALLEL_RESTART_HPP