diff --git a/ewoms/parallel/collecttoiorank.hh b/ewoms/parallel/collecttoiorank.hh
new file mode 100644
index 000000000..ad09c3274
--- /dev/null
+++ b/ewoms/parallel/collecttoiorank.hh
@@ -0,0 +1,391 @@
+/*
+ Copyright 2015 IRIS AS
+
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+#ifndef EWOMS_PARALLELSERIALOUTPUT_HH
+#define EWOMS_PARALLELSERIALOUTPUT_HH
+
+//#if HAVE_DUNE_CORNERPOINT
+#include
+//#else
+//#error "This header needs the dune-cornerpoint module."
+//#endif
+
+namespace Ewoms
+{
+ template < class GridManager >
+ class CollectDataToIORank
+ {
+ public:
+ typedef typename GridManager :: Grid Grid;
+ typedef typename Grid :: CollectiveCommunication CollectiveCommunication;
+
+ // global id
+ class GlobalCellIndex
+ {
+ int globalId_;
+ int localIndex_;
+ bool isInterior_;
+ public:
+ GlobalCellIndex() : globalId_(-1), localIndex_(-1), isInterior_(true) {}
+ void setGhost() { isInterior_ = false; }
+
+ void setId( const int globalId ) { globalId_ = globalId; }
+ void setIndex( const int localIndex ) { localIndex_ = localIndex; }
+
+ int localIndex () const { return localIndex_; }
+ int id () const { return globalId_; }
+ bool isInterior() const { return isInterior_; }
+ };
+
+ typedef typename Dune::PersistentContainer< Grid, GlobalCellIndex > GlobalIndexContainer;
+
+ static const int dimension = Grid :: dimension ;
+
+ typedef typename Grid :: LeafGridView GridView;
+ typedef GridView AllGridView;
+
+ typedef Dune :: Point2PointCommunicator< Dune :: SimpleMessageBuffer > P2PCommunicatorType;
+ typedef typename P2PCommunicatorType :: MessageBufferType MessageBufferType;
+
+ typedef std::vector< GlobalCellIndex > LocalIndexMapType;
+
+ typedef std::vector IndexMapType;
+ typedef std::vector< IndexMapType > IndexMapStorageType;
+
+ class DistributeIndexMapping : public P2PCommunicatorType::DataHandleInterface
+ {
+ protected:
+ const std::vector& distributedGlobalIndex_;
+ IndexMapType& localIndexMap_;
+ IndexMapStorageType& indexMaps_;
+ std::map< const int, const int > globalPosition_;
+#ifndef NDEBUG
+ std::set< int > checkPosition_;
+#endif
+
+ public:
+ DistributeIndexMapping( const std::vector& globalIndex,
+ const std::vector& distributedGlobalIndex,
+ IndexMapType& localIndexMap,
+ IndexMapStorageType& indexMaps )
+ : distributedGlobalIndex_( distributedGlobalIndex ),
+ localIndexMap_( localIndexMap ),
+ indexMaps_( indexMaps ),
+ globalPosition_()
+ {
+ const size_t size = globalIndex.size();
+ // create mapping globalIndex --> localIndex
+ for ( size_t index = 0; index < size; ++index )
+ {
+ globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) );
+ }
+
+ // on I/O rank we need to create a mapping from local to global
+ if( ! indexMaps_.empty() )
+ {
+ // for the ioRank create a localIndex to index in global state map
+ IndexMapType& indexMap = indexMaps_.back();
+ const size_t localSize = localIndexMap_.size();
+ indexMap.resize( localSize );
+ for( size_t i=0; i send, recv;
+ // the I/O rank receives from all other ranks
+ if( isIORank() )
+ {
+ // the I/O rank needs a picture of the global grid
+ const auto& eclGrid = *(gridManager.eclGrid());
+ const size_t cartesianSize = eclGrid.getCartesianSize();
+ // reserve memory
+ globalCartesianIndex_.reserve( cartesianSize );
+ globalCartesianIndex_.clear();
+ // get a global cartesian index for each active cell in the eclipse grid
+ for( size_t cartIndex=0; cartIndex(),
+ end = localView.template end< 0 >(); it != end; ++it, ++index )
+ {
+ const auto element = *it ;
+ // only store interior element for collection
+ if( element.partitionType() == Dune :: InteriorEntity )
+ {
+ localIndexMap_.push_back( index );
+ }
+ }
+
+ // insert send and recv linkage to communicator
+ toIORankComm_.insertRequest( send, recv );
+
+ if( isIORank() )
+ {
+ // need an index map for each rank
+ indexMaps_.clear();
+ indexMaps_.resize( comm.size() );
+ }
+
+ // store the local cartesian index
+ IndexMapType distributedCartesianIndex;
+ distributedCartesianIndex.reserve( gridSize );
+ for( size_t cell = 0 ; cell
+ class PackUnPackOutputBuffers : public P2PCommunicatorType::DataHandleInterface
+ {
+ BufferList& bufferList_;
+
+ const IndexMapType& localIndexMap_;
+ const IndexMapStorageType& indexMaps_;
+
+ public:
+ PackUnPackOutputBuffers( BufferList& bufferList,
+ const IndexMapType& localIndexMap,
+ const IndexMapStorageType& indexMaps,
+ const size_t globalSize,
+ const bool isIORank )
+ : bufferList_( bufferList ),
+ localIndexMap_( localIndexMap ),
+ indexMaps_( indexMaps )
+ {
+ if( isIORank )
+ {
+ MessageBufferType buffer;
+ pack( 0, buffer );
+ // resize all buffers
+ for (auto it = bufferList_.begin(), end = bufferList_.end(); it != end; ++it )
+ {
+ it->second->resize( globalSize );
+ }
+ // the last index map is the local one
+ doUnpack( indexMaps.back(), buffer );
+
+ }
+ }
+
+ // pack all data associated with link
+ void pack( const int link, MessageBufferType& buffer )
+ {
+ // we should only get one link
+ if( link != 0 ) {
+ OPM_THROW(std::logic_error,"link in method pack is not 0 as execpted");
+ }
+
+ size_t buffers = bufferList_.size();
+ buffer.write( buffers );
+ for (auto it = bufferList_.begin(), end = bufferList_.end(); it != end; ++it )
+ {
+ write( buffer, localIndexMap_, *(it->second) );
+ }
+ }
+
+ void doUnpack( const IndexMapType& indexMap, MessageBufferType& buffer )
+ {
+ size_t buffers = 0;
+ buffer.read( buffers );
+ assert( buffers == bufferList_.size() );
+ for( auto it = bufferList_.begin(), end = bufferList_.end(); it != end; ++it )
+ {
+ read( buffer, indexMap, *(it->second) );
+ }
+ }
+
+ // unpack all data associated with link
+ void unpack( const int link, MessageBufferType& buffer )
+ {
+ doUnpack( indexMaps_[ link ], buffer );
+ }
+
+ protected:
+ template
+ void write( MessageBufferType& buffer, const IndexMapType& localIndexMap, const Vector& data ) const
+ {
+ const size_t size = localIndexMap.size();
+ assert( size <= data.size() );
+ buffer.write( size );
+ for( size_t i=0; i
+ void read( MessageBufferType& buffer, const IndexMapType& indexMap, Vector& data ) const
+ {
+ size_t size = indexMap.size();
+ assert( size <= data.size() );
+ buffer.read( size );
+ assert( size == indexMap.size() );
+ for( size_t i=0; i
+ bool collect( BufferList& bufferList ) const
+ {
+ PackUnPackOutputBuffers< BufferList >
+ packUnpack( bufferList,
+ localIndexMap_, indexMaps_, numCells(), isIORank() );
+
+ //toIORankComm_.exchangeCached( packUnpack );
+ toIORankComm_.exchange( packUnpack );
+#ifndef NDEBUG
+ // mkae sure every process is on the same page
+ toIORankComm_.barrier();
+#endif
+ return isIORank();
+ }
+
+ bool isIORank() const
+ {
+ return isIORank_;
+ }
+
+ bool isParallel() const
+ {
+ return toIORankComm_.size() > 1;
+ }
+
+ size_t numCells () const { return globalCartesianIndex_.size(); }
+
+ protected:
+ P2PCommunicatorType toIORankComm_;
+ IndexMapType globalCartesianIndex_;
+ IndexMapType localIndexMap_;
+ IndexMapStorageType indexMaps_;
+ // true if we are on I/O rank
+ const bool isIORank_;
+ };
+
+} // end namespace Opm
+#endif