ParallelDebugOutput: only use parallel output when mpi size > 1.

This commit is contained in:
Robert Kloefkorn
2015-09-21 12:24:07 +02:00
parent 2c84414641
commit dacb4013ed
2 changed files with 138 additions and 129 deletions

View File

@@ -48,6 +48,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const = 0 ;
virtual const WellState& globalWellState() const = 0 ;
virtual bool isIORank() const = 0;
virtual bool isParallel() const = 0;
virtual int numCells() const = 0 ;
virtual const int* globalCell() const = 0;
};
@@ -79,6 +80,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const { return *globalState_; }
virtual const WellState& globalWellState() const { return *wellState_; }
virtual bool isIORank () const { return true; }
virtual bool isParallel () const { return false; }
virtual int numCells() const { return grid_.number_of_cells; }
virtual const int* globalCell() const { return grid_.global_cell; }
};
@@ -221,84 +223,87 @@ namespace Opm
isIORank_( otherGrid.comm().rank() == ioRank )
{
const CollectiveCommunication& comm = otherGrid.comm();
std::set< int > send, recv;
// the I/O rank receives from all other ranks
if( isIORank() )
if( comm.size() > 1 )
{
Dune::CpGrid globalGrid( otherGrid );
globalGrid.switchToGlobalView();
// initialize global state with correct sizes
globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
// TODO init well state
// Create wells and well state.
WellsManager wells_manager(eclipseState,
0,
Opm::UgGridHelpers::numCells( globalGrid ),
Opm::UgGridHelpers::globalCell( globalGrid ),
Opm::UgGridHelpers::cartDims( globalGrid ),
Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
0,
false);
const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
// copy global cartesian index
globalIndex_ = globalGrid.globalCell();
unsigned int count = 0;
auto gridView = globalGrid.leafGridView();
for( auto it = gridView.begin< 0 >(),
end = gridView.end< 0 >(); it != end; ++it, ++count )
std::set< int > send, recv;
// the I/O rank receives from all other ranks
if( isIORank() )
{
}
assert( count == globalIndex_.size() );
Dune::CpGrid globalGrid( otherGrid );
globalGrid.switchToGlobalView();
for(int i=0; i<comm.size(); ++i)
{
if( i != ioRank )
// initialize global state with correct sizes
globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
// TODO init well state
// Create wells and well state.
WellsManager wells_manager(eclipseState,
0,
Opm::UgGridHelpers::numCells( globalGrid ),
Opm::UgGridHelpers::globalCell( globalGrid ),
Opm::UgGridHelpers::cartDims( globalGrid ),
Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
0,
false);
const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
// copy global cartesian index
globalIndex_ = globalGrid.globalCell();
unsigned int count = 0;
auto gridView = globalGrid.leafGridView();
for( auto it = gridView.begin< 0 >(),
end = gridView.end< 0 >(); it != end; ++it, ++count )
{
recv.insert( i );
}
assert( count == globalIndex_.size() );
for(int i=0; i<comm.size(); ++i)
{
if( i != ioRank )
{
recv.insert( i );
}
}
}
}
else // all other simply send to the I/O rank
{
send.insert( ioRank );
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
const auto element = *it ;
// only store interior element for collection
if( element.partitionType() == Dune :: InteriorEntity )
else // all other simply send to the I/O rank
{
localIndexMap_.push_back( index );
send.insert( ioRank );
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
const auto element = *it ;
// only store interior element for collection
if( element.partitionType() == Dune :: InteriorEntity )
{
localIndexMap_.push_back( index );
}
}
// insert send and recv linkage to communicator
toIORankComm_.insertRequest( send, recv );
if( isIORank() )
{
// need an index map for each rank
indexMaps_.clear();
indexMaps_.resize( comm.size() );
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
// insert send and recv linkage to communicator
toIORankComm_.insertRequest( send, recv );
if( isIORank() )
{
// need an index map for each rank
indexMaps_.clear();
indexMaps_.resize( comm.size() );
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
class PackUnPackSimulatorState : public P2PCommunicatorType::DataHandleInterface
@@ -533,6 +538,11 @@ namespace Opm
return isIORank_;
}
bool isParallel() const
{
return toIORankComm_.size() > 1;
}
int numCells () const { return globalIndex_.size(); }
const int* globalCell () const
{
@@ -545,7 +555,6 @@ namespace Opm
IndexMapType globalIndex_;
IndexMapType localIndexMap_;
IndexMapStorageType indexMaps_;
//BlackoilState globalReservoirState_;
SimulatorState globalReservoirState_;
// this needs to be revised
WellStateFullyImplicitBlackoil globalWellState_;