Merge pull request #484 from dr-robertk/PR/Parallel-output-only-in-parallel-runs

ParallelDebugOutput: only use parallel output when mpi size > 1.
This commit is contained in:
Atgeirr Flø Rasmussen 2015-09-30 09:42:28 +02:00
commit 57deb18dc4
5 changed files with 157 additions and 135 deletions

View File

@ -348,7 +348,7 @@ try
// create output writer after grid is distributed, otherwise the parallel output
// won't work correctly since we need to create a mapping from the distributed to
// the global view
Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu );
Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu, new_props.permeability() );
// Solver for Newton iterations.
std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver;

View File

@ -358,7 +358,7 @@ try
// create output writer after grid is distributed, otherwise the parallel output
// won't work correctly since we need to create a mapping from the distributed to
// the global view
Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu );
Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu, new_props.permeability() );
// Solver for Newton iterations.
std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver;

View File

@ -48,6 +48,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const = 0 ;
virtual const WellState& globalWellState() const = 0 ;
virtual bool isIORank() const = 0;
virtual bool isParallel() const = 0;
virtual int numCells() const = 0 ;
virtual const int* globalCell() const = 0;
};
@ -64,7 +65,8 @@ namespace Opm
public:
ParallelDebugOutput ( const GridImpl& grid,
Opm::EclipseStateConstPtr /* eclipseState */,
const int )
const int,
const double* )
: grid_( grid ) {}
// gather solution to rank 0 for EclipseWriter
@ -79,6 +81,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const { return *globalState_; }
virtual const WellState& globalWellState() const { return *wellState_; }
virtual bool isIORank () const { return true; }
virtual bool isParallel () const { return false; }
virtual int numCells() const { return grid_.number_of_cells; }
virtual const int* globalCell() const { return grid_.global_cell; }
};
@ -152,6 +155,7 @@ namespace Opm
globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) );
}
// on I/O rank we need to create a mapping from local to global
if( ! indexMaps_.empty() )
{
// for the ioRank create a localIndex to index in global state map
@ -161,7 +165,7 @@ namespace Opm
for( size_t i=0; i<localSize; ++i )
{
const int id = distributedGlobalIndex_[ localIndexMap_[ i ] ];
indexMap[ i ] = id ;
indexMap[ i ] = globalPosition_[ id ] ;
#ifndef NDEBUG
assert( checkPosition_.find( id ) == checkPosition_.end() );
checkPosition_.insert( id );
@ -216,89 +220,99 @@ namespace Opm
ParallelDebugOutput( const Dune::CpGrid& otherGrid,
Opm::EclipseStateConstPtr eclipseState,
const int numPhases )
const int numPhases,
const double* permeability )
: toIORankComm_( otherGrid.comm() ),
isIORank_( otherGrid.comm().rank() == ioRank )
{
const CollectiveCommunication& comm = otherGrid.comm();
std::set< int > send, recv;
// the I/O rank receives from all other ranks
if( isIORank() )
if( comm.size() > 1 )
{
Dune::CpGrid globalGrid( otherGrid );
globalGrid.switchToGlobalView();
// initialize global state with correct sizes
globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
// TODO init well state
// Create wells and well state.
WellsManager wells_manager(eclipseState,
0,
Opm::UgGridHelpers::numCells( globalGrid ),
Opm::UgGridHelpers::globalCell( globalGrid ),
Opm::UgGridHelpers::cartDims( globalGrid ),
Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
0,
false);
const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
// copy global cartesian index
globalIndex_ = globalGrid.globalCell();
unsigned int count = 0;
auto gridView = globalGrid.leafGridView();
for( auto it = gridView.begin< 0 >(),
end = gridView.end< 0 >(); it != end; ++it, ++count )
std::set< int > send, recv;
// the I/O rank receives from all other ranks
if( isIORank() )
{
}
assert( count == globalIndex_.size() );
Dune::CpGrid globalGrid( otherGrid );
globalGrid.switchToGlobalView();
for(int i=0; i<comm.size(); ++i)
{
if( i != ioRank )
// initialize global state with correct sizes
globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
// TODO init well state
// Create wells and well state.
WellsManager wells_manager(eclipseState,
0,
Opm::UgGridHelpers::numCells( globalGrid ),
Opm::UgGridHelpers::globalCell( globalGrid ),
Opm::UgGridHelpers::cartDims( globalGrid ),
Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
permeability,
false);
const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
// copy global cartesian index
globalIndex_ = globalGrid.globalCell();
unsigned int count = 0;
auto gridView = globalGrid.leafGridView();
for( auto it = gridView.begin< 0 >(),
end = gridView.end< 0 >(); it != end; ++it, ++count )
{
recv.insert( i );
}
assert( count == globalIndex_.size() );
for(int i=0; i<comm.size(); ++i)
{
if( i != ioRank )
{
recv.insert( i );
}
}
}
}
else // all other simply send to the I/O rank
{
send.insert( ioRank );
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
const auto element = *it ;
// only store interior element for collection
if( element.partitionType() == Dune :: InteriorEntity )
else // all other simply send to the I/O rank
{
localIndexMap_.push_back( index );
send.insert( ioRank );
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
const auto element = *it ;
// only store interior element for collection
if( element.partitionType() == Dune :: InteriorEntity )
{
localIndexMap_.push_back( index );
}
}
// insert send and recv linkage to communicator
toIORankComm_.insertRequest( send, recv );
if( isIORank() )
{
// need an index map for each rank
indexMaps_.clear();
indexMaps_.resize( comm.size() );
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
// insert send and recv linkage to communicator
toIORankComm_.insertRequest( send, recv );
if( isIORank() )
else // serial run
{
// need an index map for each rank
indexMaps_.clear();
indexMaps_.resize( comm.size() );
// copy global cartesian index
globalIndex_ = otherGrid.globalCell();
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
class PackUnPackSimulatorState : public P2PCommunicatorType::DataHandleInterface
@ -411,6 +425,7 @@ namespace Opm
for( unsigned int i=0; i<size; ++i )
{
const unsigned int index = localIndexMap[ i ] * stride + offset;
assert( index < vector.size() );
buffer.write( vector[ index ] );
}
}
@ -427,6 +442,7 @@ namespace Opm
for( unsigned int i=0; i<size; ++i )
{
const unsigned int index = indexMap[ i ] * stride + offset;
assert( index < vector.size() );
buffer.read( vector[ index ] );
}
}
@ -533,6 +549,11 @@ namespace Opm
return isIORank_;
}
bool isParallel() const
{
return toIORankComm_.size() > 1;
}
int numCells () const { return globalIndex_.size(); }
const int* globalCell () const
{
@ -545,7 +566,6 @@ namespace Opm
IndexMapType globalIndex_;
IndexMapType localIndexMap_;
IndexMapStorageType indexMaps_;
//BlackoilState globalReservoirState_;
SimulatorState globalReservoirState_;
// this needs to be revised
WellStateFullyImplicitBlackoil globalWellState_;

View File

@ -261,68 +261,68 @@ namespace Opm
vtkWriter_->writeTimeStep( timer, localState, localWellState, false );
}
if( parallelOutput_ )
bool isIORank = true ;
if( parallelOutput_ && parallelOutput_->isParallel() )
{
// collect all solutions to I/O rank
const bool isIORank = parallelOutput_->collectToIORank( localState, localWellState );
if( isIORank )
{
const SimulatorState& state = parallelOutput_->globalReservoirState();
const WellState& wellState = parallelOutput_->globalWellState();
//std::cout << "number of wells" << wellState.bhp().size() << std::endl;
// Matlab output
if( matlabWriter_ ) {
matlabWriter_->writeTimeStep( timer, state, wellState, substep );
}
// ECL output
if ( eclWriter_ ) {
eclWriter_->writeTimeStep(timer, state, wellState, substep );
}
// write backup file
if( backupfile_ )
{
int reportStep = timer.reportStepNum();
int currentTimeStep = timer.currentStepNum();
if( (reportStep == currentTimeStep || // true for SimulatorTimer
currentTimeStep == 0 || // true for AdaptiveSimulatorTimer at reportStep
timer.done() ) // true for AdaptiveSimulatorTimer at reportStep
&& lastBackupReportStep_ != reportStep ) // only backup report step once
{
// store report step
lastBackupReportStep_ = reportStep;
// write resport step number
backupfile_.write( (const char *) &reportStep, sizeof(int) );
try {
const BlackoilState& boState = dynamic_cast< const BlackoilState& > (state);
backupfile_ << boState;
const WellStateFullyImplicitBlackoil& boWellState = static_cast< const WellStateFullyImplicitBlackoil& > (wellState);
backupfile_ << boWellState;
}
catch ( const std::bad_cast& e )
{
}
/*
const WellStateFullyImplicitBlackoil* boWellState =
dynamic_cast< const WellStateFullyImplicitBlackoil* > (&wellState);
if( boWellState ) {
backupfile_ << (*boWellState);
}
else
OPM_THROW(std::logic_error,"cast to WellStateFullyImplicitBlackoil failed");
*/
backupfile_ << std::flush;
}
}
}
isIORank = parallelOutput_->collectToIORank( localState, localWellState );
}
const SimulatorState& state = (parallelOutput_ && parallelOutput_->isParallel() ) ? parallelOutput_->globalReservoirState() : localState;
const WellState& wellState = (parallelOutput_ && parallelOutput_->isParallel() ) ? parallelOutput_->globalWellState() : localWellState;
// output is only done on I/O rank
if( isIORank )
{
// Matlab output
if( matlabWriter_ ) {
matlabWriter_->writeTimeStep( timer, state, wellState, substep );
}
// ECL output
if ( eclWriter_ ) {
eclWriter_->writeTimeStep(timer, state, wellState, substep );
}
// write backup file
if( backupfile_ )
{
int reportStep = timer.reportStepNum();
int currentTimeStep = timer.currentStepNum();
if( (reportStep == currentTimeStep || // true for SimulatorTimer
currentTimeStep == 0 || // true for AdaptiveSimulatorTimer at reportStep
timer.done() ) // true for AdaptiveSimulatorTimer at reportStep
&& lastBackupReportStep_ != reportStep ) // only backup report step once
{
// store report step
lastBackupReportStep_ = reportStep;
// write resport step number
backupfile_.write( (const char *) &reportStep, sizeof(int) );
try {
const BlackoilState& boState = dynamic_cast< const BlackoilState& > (state);
backupfile_ << boState;
const WellStateFullyImplicitBlackoil& boWellState = static_cast< const WellStateFullyImplicitBlackoil& > (wellState);
backupfile_ << boWellState;
}
catch ( const std::bad_cast& e )
{
}
/*
const WellStateFullyImplicitBlackoil* boWellState =
dynamic_cast< const WellStateFullyImplicitBlackoil* > (&wellState);
if( boWellState ) {
backupfile_ << (*boWellState);
}
else
OPM_THROW(std::logic_error,"cast to WellStateFullyImplicitBlackoil failed");
*/
backupfile_ << std::flush;
}
} // end backup
} // end isIORank
}
void

View File

@ -200,7 +200,8 @@ namespace Opm
BlackoilOutputWriter(const Grid& grid,
const parameter::ParameterGroup& param,
Opm::EclipseStateConstPtr eclipseState,
const Opm::PhaseUsage &phaseUsage);
const Opm::PhaseUsage &phaseUsage,
const double* permeability );
/** \copydoc Opm::OutputWriter::writeInit */
void writeInit(const SimulatorTimerInterface &timer);
@ -251,9 +252,10 @@ namespace Opm
BlackoilOutputWriter(const Grid& grid,
const parameter::ParameterGroup& param,
Opm::EclipseStateConstPtr eclipseState,
const Opm::PhaseUsage &phaseUsage )
const Opm::PhaseUsage &phaseUsage,
const double* permeability )
: output_( param.getDefault("output", true) ),
parallelOutput_( output_ ? new ParallelDebugOutput< Grid >( grid, eclipseState, phaseUsage.num_phases ) : 0 ),
parallelOutput_( output_ ? new ParallelDebugOutput< Grid >( grid, eclipseState, phaseUsage.num_phases, permeability ) : 0 ),
outputDir_( output_ ? param.getDefault("output_dir", std::string("output")) : "." ),
output_interval_( output_ ? param.getDefault("output_interval", 1): 0 ),
lastBackupReportStep_( -1 ),