// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- // vi: set et ts=4 sw=4 sts=4: /* This file is part of the Open Porous Media project (OPM). OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OPM. If not, see . Consult the COPYING file in the top-level source directory of this module for the precise wording of the license and the list of copyright holders. */ #ifndef EWOMS_PARALLELSERIALOUTPUT_HH #define EWOMS_PARALLELSERIALOUTPUT_HH //#if HAVE_OPM_GRID #include #include #include //#else //#error "This header needs the opm-grid module." //#endif #include #include #include #include #include #include namespace Ewoms { template < class GridManager > class CollectDataToIORank { public: typedef typename GridManager :: Grid Grid; typedef typename Grid :: CollectiveCommunication CollectiveCommunication; // global id class GlobalCellIndex { int globalId_; int localIndex_; bool isInterior_; public: GlobalCellIndex() : globalId_(-1), localIndex_(-1), isInterior_(true) {} void setGhost() { isInterior_ = false; } void setId( const int globalId ) { globalId_ = globalId; } void setIndex( const int localIndex ) { localIndex_ = localIndex; } int localIndex () const { return localIndex_; } int id () const { return globalId_; } bool isInterior() const { return isInterior_; } }; typedef typename Dune::PersistentContainer< Grid, GlobalCellIndex > GlobalIndexContainer; static const int dimension = Grid :: dimension ; typedef typename Grid :: LeafGridView GridView; typedef GridView AllGridView; typedef Dune :: Point2PointCommunicator< Dune :: SimpleMessageBuffer > P2PCommunicatorType; typedef typename P2PCommunicatorType :: MessageBufferType MessageBufferType; typedef std::vector< GlobalCellIndex > LocalIndexMapType; typedef std::vector IndexMapType; typedef std::vector< IndexMapType > IndexMapStorageType; class DistributeIndexMapping : public P2PCommunicatorType::DataHandleInterface { protected: const std::vector& distributedGlobalIndex_; IndexMapType& localIndexMap_; IndexMapStorageType& indexMaps_; std::map< const int, const int > globalPosition_; #ifndef NDEBUG std::set< int > checkPosition_; #endif public: DistributeIndexMapping( const std::vector& globalIndex, const std::vector& distributedGlobalIndex, IndexMapType& localIndexMap, IndexMapStorageType& indexMaps ) : distributedGlobalIndex_( distributedGlobalIndex ), localIndexMap_( localIndexMap ), indexMaps_( indexMaps ), globalPosition_() { const size_t size = globalIndex.size(); // create mapping globalIndex --> localIndex for ( size_t index = 0; index < size; ++index ) { globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) ); } // on I/O rank we need to create a mapping from local to global if( ! indexMaps_.empty() ) { // for the ioRank create a localIndex to index in global state map IndexMapType& indexMap = indexMaps_.back(); const size_t localSize = localIndexMap_.size(); indexMap.resize( localSize ); for( size_t i=0; i ElementMapper; const CollectiveCommunication& comm = gridManager.grid().comm(); ElementMapper elemMapper(gridManager.gridView()); { std::set< int > send, recv; // the I/O rank receives from all other ranks if( isIORank() ) { // the I/O rank needs a picture of the global grid, here we // use equilGrid which represents a view on the global grid const size_t globalSize = gridManager.equilGrid().leafGridView().size( 0 ); // reserve memory globalCartesianIndex_.resize(globalSize, -1); // loop over all elements (global grid) and store Cartesian index auto elemIt = gridManager.equilGrid().leafGridView().template begin<0>(); const auto& elemEndIt = gridManager.equilGrid().leafGridView().template end<0>(); for (; elemIt != elemEndIt; ++elemIt) { #if DUNE_VERSION_NEWER(DUNE_COMMON, 2,4) int elemIdx = elemMapper.index(*elemIt ); #else int elemIdx = elemMapper.map(*elemIt ); #endif int cartElemIdx = gridManager.equilCartesianIndexMapper().cartesianIndex(elemIdx); globalCartesianIndex_[elemIdx] = cartElemIdx; } for(int i=0; i(), end = localView.template end< 0 >(); it != end; ++it ) { const auto element = *it ; #if DUNE_VERSION_NEWER(DUNE_COMMON, 2,4) int elemIdx = elemMapper.index( element ); #else int elemIdx = elemMapper.map( element ); #endif distributedCartesianIndex.push_back( gridManager.cartesianIndex( elemIdx ) ); // only store interior element for collection if( element.partitionType() == Dune :: InteriorEntity ) { localIndexMap_[elemIdx] = elemIdx; } } // insert send and recv linkage to communicator toIORankComm_.insertRequest( send, recv ); if( isIORank() ) { // need an index map for each rank indexMaps_.clear(); indexMaps_.resize( comm.size() ); } // distribute global id's to io rank for later association of dof's DistributeIndexMapping distIndexMapping( globalCartesianIndex_, distributedCartesianIndex, localIndexMap_, indexMaps_ ); toIORankComm_.exchange( distIndexMapping ); } } template class PackUnPackOutputBuffers : public P2PCommunicatorType::DataHandleInterface { BufferList& bufferList_; const IndexMapType& localIndexMap_; const IndexMapStorageType& indexMaps_; public: PackUnPackOutputBuffers( BufferList& bufferList, const IndexMapType& localIndexMap, const IndexMapStorageType& indexMaps, const size_t globalSize, const bool isIORank ) : bufferList_( bufferList ), localIndexMap_( localIndexMap ), indexMaps_( indexMaps ) { if( isIORank ) { MessageBufferType buffer; pack( 0, buffer ); // resize all buffers for (auto it = bufferList_.begin(), end = bufferList_.end(); it != end; ++it ) { it->second->resize( globalSize ); } // the last index map is the local one doUnpack( indexMaps.back(), buffer ); } } // pack all data associated with link void pack( const int link, MessageBufferType& buffer ) { // we should only get one link if( link != 0 ) { OPM_THROW(std::logic_error,"link in method pack is not 0 as execpted"); } size_t buffers = bufferList_.size(); buffer.write( buffers ); for (auto it = bufferList_.begin(), end = bufferList_.end(); it != end; ++it ) { write( buffer, localIndexMap_, *(it->second) ); } } void doUnpack( const IndexMapType& indexMap, MessageBufferType& buffer ) { size_t buffers = 0; buffer.read( buffers ); assert( buffers == bufferList_.size() ); for( auto it = bufferList_.begin(), end = bufferList_.end(); it != end; ++it ) { read( buffer, indexMap, *(it->second) ); } } // unpack all data associated with link void unpack( const int link, MessageBufferType& buffer ) { doUnpack( indexMaps_[ link ], buffer ); } protected: template void write( MessageBufferType& buffer, const IndexMapType& localIndexMap, const Vector& data ) const { const size_t size = localIndexMap.size(); assert( size <= data.size() ); buffer.write( size ); for( size_t i=0; i void read( MessageBufferType& buffer, const IndexMapType& indexMap, Vector& data ) const { size_t size = indexMap.size(); assert( size <= data.size() ); buffer.read( size ); assert( size == indexMap.size() ); for( size_t i=0; i void collect( BufferList& bufferList ) const { PackUnPackOutputBuffers< BufferList > packUnpack( bufferList, localIndexMap_, indexMaps_, numCells(), isIORank() ); //toIORankComm_.exchangeCached( packUnpack ); toIORankComm_.exchange( packUnpack ); #ifndef NDEBUG // mkae sure every process is on the same page toIORankComm_.barrier(); #endif } bool isIORank() const { return isIORank_; } bool isParallel() const { return toIORankComm_.size() > 1; } size_t numCells () const { return globalCartesianIndex_.size(); } protected: P2PCommunicatorType toIORankComm_; IndexMapType globalCartesianIndex_; IndexMapType localIndexMap_; IndexMapStorageType indexMaps_; // true if we are on I/O rank bool isIORank_; }; } // end namespace Opm #endif