ParallelDebugOutput: only use parallel output when mpi size > 1.

This commit is contained in:
Robert Kloefkorn 2015-09-21 12:24:07 +02:00
parent 2c84414641
commit dacb4013ed
2 changed files with 138 additions and 129 deletions

View File

@ -48,6 +48,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const = 0 ;
virtual const WellState& globalWellState() const = 0 ;
virtual bool isIORank() const = 0;
virtual bool isParallel() const = 0;
virtual int numCells() const = 0 ;
virtual const int* globalCell() const = 0;
};
@ -79,6 +80,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const { return *globalState_; }
virtual const WellState& globalWellState() const { return *wellState_; }
virtual bool isIORank () const { return true; }
virtual bool isParallel () const { return false; }
virtual int numCells() const { return grid_.number_of_cells; }
virtual const int* globalCell() const { return grid_.global_cell; }
};
@ -221,84 +223,87 @@ namespace Opm
isIORank_( otherGrid.comm().rank() == ioRank )
{
const CollectiveCommunication& comm = otherGrid.comm();
std::set< int > send, recv;
// the I/O rank receives from all other ranks
if( isIORank() )
if( comm.size() > 1 )
{
Dune::CpGrid globalGrid( otherGrid );
globalGrid.switchToGlobalView();
// initialize global state with correct sizes
globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
// TODO init well state
// Create wells and well state.
WellsManager wells_manager(eclipseState,
0,
Opm::UgGridHelpers::numCells( globalGrid ),
Opm::UgGridHelpers::globalCell( globalGrid ),
Opm::UgGridHelpers::cartDims( globalGrid ),
Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
0,
false);
const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
// copy global cartesian index
globalIndex_ = globalGrid.globalCell();
unsigned int count = 0;
auto gridView = globalGrid.leafGridView();
for( auto it = gridView.begin< 0 >(),
end = gridView.end< 0 >(); it != end; ++it, ++count )
std::set< int > send, recv;
// the I/O rank receives from all other ranks
if( isIORank() )
{
}
assert( count == globalIndex_.size() );
Dune::CpGrid globalGrid( otherGrid );
globalGrid.switchToGlobalView();
for(int i=0; i<comm.size(); ++i)
{
if( i != ioRank )
// initialize global state with correct sizes
globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
// TODO init well state
// Create wells and well state.
WellsManager wells_manager(eclipseState,
0,
Opm::UgGridHelpers::numCells( globalGrid ),
Opm::UgGridHelpers::globalCell( globalGrid ),
Opm::UgGridHelpers::cartDims( globalGrid ),
Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
0,
false);
const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
// copy global cartesian index
globalIndex_ = globalGrid.globalCell();
unsigned int count = 0;
auto gridView = globalGrid.leafGridView();
for( auto it = gridView.begin< 0 >(),
end = gridView.end< 0 >(); it != end; ++it, ++count )
{
recv.insert( i );
}
assert( count == globalIndex_.size() );
for(int i=0; i<comm.size(); ++i)
{
if( i != ioRank )
{
recv.insert( i );
}
}
}
}
else // all other simply send to the I/O rank
{
send.insert( ioRank );
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
const auto element = *it ;
// only store interior element for collection
if( element.partitionType() == Dune :: InteriorEntity )
else // all other simply send to the I/O rank
{
localIndexMap_.push_back( index );
send.insert( ioRank );
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
const auto element = *it ;
// only store interior element for collection
if( element.partitionType() == Dune :: InteriorEntity )
{
localIndexMap_.push_back( index );
}
}
// insert send and recv linkage to communicator
toIORankComm_.insertRequest( send, recv );
if( isIORank() )
{
// need an index map for each rank
indexMaps_.clear();
indexMaps_.resize( comm.size() );
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
// insert send and recv linkage to communicator
toIORankComm_.insertRequest( send, recv );
if( isIORank() )
{
// need an index map for each rank
indexMaps_.clear();
indexMaps_.resize( comm.size() );
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
class PackUnPackSimulatorState : public P2PCommunicatorType::DataHandleInterface
@ -533,6 +538,11 @@ namespace Opm
return isIORank_;
}
bool isParallel() const
{
return toIORankComm_.size() > 1;
}
int numCells () const { return globalIndex_.size(); }
const int* globalCell () const
{
@ -545,7 +555,6 @@ namespace Opm
IndexMapType globalIndex_;
IndexMapType localIndexMap_;
IndexMapStorageType indexMaps_;
//BlackoilState globalReservoirState_;
SimulatorState globalReservoirState_;
// this needs to be revised
WellStateFullyImplicitBlackoil globalWellState_;

View File

@ -261,68 +261,68 @@ namespace Opm
vtkWriter_->writeTimeStep( timer, localState, localWellState, false );
}
if( parallelOutput_ )
bool isIORank = true ;
if( parallelOutput_ && parallelOutput_->isParallel() )
{
// collect all solutions to I/O rank
const bool isIORank = parallelOutput_->collectToIORank( localState, localWellState );
if( isIORank )
{
const SimulatorState& state = parallelOutput_->globalReservoirState();
const WellState& wellState = parallelOutput_->globalWellState();
//std::cout << "number of wells" << wellState.bhp().size() << std::endl;
// Matlab output
if( matlabWriter_ ) {
matlabWriter_->writeTimeStep( timer, state, wellState, substep );
}
// ECL output
if ( eclWriter_ ) {
eclWriter_->writeTimeStep(timer, state, wellState, substep );
}
// write backup file
if( backupfile_ )
{
int reportStep = timer.reportStepNum();
int currentTimeStep = timer.currentStepNum();
if( (reportStep == currentTimeStep || // true for SimulatorTimer
currentTimeStep == 0 || // true for AdaptiveSimulatorTimer at reportStep
timer.done() ) // true for AdaptiveSimulatorTimer at reportStep
&& lastBackupReportStep_ != reportStep ) // only backup report step once
{
// store report step
lastBackupReportStep_ = reportStep;
// write resport step number
backupfile_.write( (const char *) &reportStep, sizeof(int) );
try {
const BlackoilState& boState = dynamic_cast< const BlackoilState& > (state);
backupfile_ << boState;
const WellStateFullyImplicitBlackoil& boWellState = static_cast< const WellStateFullyImplicitBlackoil& > (wellState);
backupfile_ << boWellState;
}
catch ( const std::bad_cast& e )
{
}
/*
const WellStateFullyImplicitBlackoil* boWellState =
dynamic_cast< const WellStateFullyImplicitBlackoil* > (&wellState);
if( boWellState ) {
backupfile_ << (*boWellState);
}
else
OPM_THROW(std::logic_error,"cast to WellStateFullyImplicitBlackoil failed");
*/
backupfile_ << std::flush;
}
}
}
isIORank = parallelOutput_->collectToIORank( localState, localWellState );
}
const SimulatorState& state = (parallelOutput_ && parallelOutput_->isParallel() ) ? parallelOutput_->globalReservoirState() : localState;
const WellState& wellState = (parallelOutput_ && parallelOutput_->isParallel() ) ? parallelOutput_->globalWellState() : localWellState;
// output is only done on I/O rank
if( isIORank )
{
// Matlab output
if( matlabWriter_ ) {
matlabWriter_->writeTimeStep( timer, state, wellState, substep );
}
// ECL output
if ( eclWriter_ ) {
eclWriter_->writeTimeStep(timer, state, wellState, substep );
}
// write backup file
if( backupfile_ )
{
int reportStep = timer.reportStepNum();
int currentTimeStep = timer.currentStepNum();
if( (reportStep == currentTimeStep || // true for SimulatorTimer
currentTimeStep == 0 || // true for AdaptiveSimulatorTimer at reportStep
timer.done() ) // true for AdaptiveSimulatorTimer at reportStep
&& lastBackupReportStep_ != reportStep ) // only backup report step once
{
// store report step
lastBackupReportStep_ = reportStep;
// write resport step number
backupfile_.write( (const char *) &reportStep, sizeof(int) );
try {
const BlackoilState& boState = dynamic_cast< const BlackoilState& > (state);
backupfile_ << boState;
const WellStateFullyImplicitBlackoil& boWellState = static_cast< const WellStateFullyImplicitBlackoil& > (wellState);
backupfile_ << boWellState;
}
catch ( const std::bad_cast& e )
{
}
/*
const WellStateFullyImplicitBlackoil* boWellState =
dynamic_cast< const WellStateFullyImplicitBlackoil* > (&wellState);
if( boWellState ) {
backupfile_ << (*boWellState);
}
else
OPM_THROW(std::logic_error,"cast to WellStateFullyImplicitBlackoil failed");
*/
backupfile_ << std::flush;
}
} // end backup
} // end isIORank
}
void