Merge pull request #484 from dr-robertk/PR/Parallel-output-only-in-parallel-runs

ParallelDebugOutput: only use parallel output when mpi size > 1.
This commit is contained in:
Atgeirr Flø Rasmussen 2015-09-30 09:42:28 +02:00
commit 57deb18dc4
5 changed files with 157 additions and 135 deletions

View File

@ -348,7 +348,7 @@ try
// create output writer after grid is distributed, otherwise the parallel output // create output writer after grid is distributed, otherwise the parallel output
// won't work correctly since we need to create a mapping from the distributed to // won't work correctly since we need to create a mapping from the distributed to
// the global view // the global view
Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu ); Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu, new_props.permeability() );
// Solver for Newton iterations. // Solver for Newton iterations.
std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver; std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver;

View File

@ -358,7 +358,7 @@ try
// create output writer after grid is distributed, otherwise the parallel output // create output writer after grid is distributed, otherwise the parallel output
// won't work correctly since we need to create a mapping from the distributed to // won't work correctly since we need to create a mapping from the distributed to
// the global view // the global view
Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu ); Opm::BlackoilOutputWriter outputWriter(grid, param, eclipseState, pu, new_props.permeability() );
// Solver for Newton iterations. // Solver for Newton iterations.
std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver; std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver;

View File

@ -48,6 +48,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const = 0 ; virtual const SimulatorState& globalReservoirState() const = 0 ;
virtual const WellState& globalWellState() const = 0 ; virtual const WellState& globalWellState() const = 0 ;
virtual bool isIORank() const = 0; virtual bool isIORank() const = 0;
virtual bool isParallel() const = 0;
virtual int numCells() const = 0 ; virtual int numCells() const = 0 ;
virtual const int* globalCell() const = 0; virtual const int* globalCell() const = 0;
}; };
@ -64,7 +65,8 @@ namespace Opm
public: public:
ParallelDebugOutput ( const GridImpl& grid, ParallelDebugOutput ( const GridImpl& grid,
Opm::EclipseStateConstPtr /* eclipseState */, Opm::EclipseStateConstPtr /* eclipseState */,
const int ) const int,
const double* )
: grid_( grid ) {} : grid_( grid ) {}
// gather solution to rank 0 for EclipseWriter // gather solution to rank 0 for EclipseWriter
@ -79,6 +81,7 @@ namespace Opm
virtual const SimulatorState& globalReservoirState() const { return *globalState_; } virtual const SimulatorState& globalReservoirState() const { return *globalState_; }
virtual const WellState& globalWellState() const { return *wellState_; } virtual const WellState& globalWellState() const { return *wellState_; }
virtual bool isIORank () const { return true; } virtual bool isIORank () const { return true; }
virtual bool isParallel () const { return false; }
virtual int numCells() const { return grid_.number_of_cells; } virtual int numCells() const { return grid_.number_of_cells; }
virtual const int* globalCell() const { return grid_.global_cell; } virtual const int* globalCell() const { return grid_.global_cell; }
}; };
@ -152,6 +155,7 @@ namespace Opm
globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) ); globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) );
} }
// on I/O rank we need to create a mapping from local to global
if( ! indexMaps_.empty() ) if( ! indexMaps_.empty() )
{ {
// for the ioRank create a localIndex to index in global state map // for the ioRank create a localIndex to index in global state map
@ -161,7 +165,7 @@ namespace Opm
for( size_t i=0; i<localSize; ++i ) for( size_t i=0; i<localSize; ++i )
{ {
const int id = distributedGlobalIndex_[ localIndexMap_[ i ] ]; const int id = distributedGlobalIndex_[ localIndexMap_[ i ] ];
indexMap[ i ] = id ; indexMap[ i ] = globalPosition_[ id ] ;
#ifndef NDEBUG #ifndef NDEBUG
assert( checkPosition_.find( id ) == checkPosition_.end() ); assert( checkPosition_.find( id ) == checkPosition_.end() );
checkPosition_.insert( id ); checkPosition_.insert( id );
@ -216,11 +220,14 @@ namespace Opm
ParallelDebugOutput( const Dune::CpGrid& otherGrid, ParallelDebugOutput( const Dune::CpGrid& otherGrid,
Opm::EclipseStateConstPtr eclipseState, Opm::EclipseStateConstPtr eclipseState,
const int numPhases ) const int numPhases,
const double* permeability )
: toIORankComm_( otherGrid.comm() ), : toIORankComm_( otherGrid.comm() ),
isIORank_( otherGrid.comm().rank() == ioRank ) isIORank_( otherGrid.comm().rank() == ioRank )
{ {
const CollectiveCommunication& comm = otherGrid.comm(); const CollectiveCommunication& comm = otherGrid.comm();
if( comm.size() > 1 )
{
std::set< int > send, recv; std::set< int > send, recv;
// the I/O rank receives from all other ranks // the I/O rank receives from all other ranks
if( isIORank() ) if( isIORank() )
@ -241,8 +248,9 @@ namespace Opm
Opm::UgGridHelpers::dimensions( globalGrid ), Opm::UgGridHelpers::dimensions( globalGrid ),
Opm::UgGridHelpers::cell2Faces( globalGrid ), Opm::UgGridHelpers::cell2Faces( globalGrid ),
Opm::UgGridHelpers::beginFaceCentroids( globalGrid ), Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
0, permeability,
false); false);
const Wells* wells = wells_manager.c_wells(); const Wells* wells = wells_manager.c_wells();
globalWellState_.init(wells, globalReservoirState_, globalWellState_ ); globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
@ -300,6 +308,12 @@ namespace Opm
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ ); DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping ); toIORankComm_.exchange( distIndexMapping );
} }
else // serial run
{
// copy global cartesian index
globalIndex_ = otherGrid.globalCell();
}
}
class PackUnPackSimulatorState : public P2PCommunicatorType::DataHandleInterface class PackUnPackSimulatorState : public P2PCommunicatorType::DataHandleInterface
{ {
@ -411,6 +425,7 @@ namespace Opm
for( unsigned int i=0; i<size; ++i ) for( unsigned int i=0; i<size; ++i )
{ {
const unsigned int index = localIndexMap[ i ] * stride + offset; const unsigned int index = localIndexMap[ i ] * stride + offset;
assert( index < vector.size() );
buffer.write( vector[ index ] ); buffer.write( vector[ index ] );
} }
} }
@ -427,6 +442,7 @@ namespace Opm
for( unsigned int i=0; i<size; ++i ) for( unsigned int i=0; i<size; ++i )
{ {
const unsigned int index = indexMap[ i ] * stride + offset; const unsigned int index = indexMap[ i ] * stride + offset;
assert( index < vector.size() );
buffer.read( vector[ index ] ); buffer.read( vector[ index ] );
} }
} }
@ -533,6 +549,11 @@ namespace Opm
return isIORank_; return isIORank_;
} }
bool isParallel() const
{
return toIORankComm_.size() > 1;
}
int numCells () const { return globalIndex_.size(); } int numCells () const { return globalIndex_.size(); }
const int* globalCell () const const int* globalCell () const
{ {
@ -545,7 +566,6 @@ namespace Opm
IndexMapType globalIndex_; IndexMapType globalIndex_;
IndexMapType localIndexMap_; IndexMapType localIndexMap_;
IndexMapStorageType indexMaps_; IndexMapStorageType indexMaps_;
//BlackoilState globalReservoirState_;
SimulatorState globalReservoirState_; SimulatorState globalReservoirState_;
// this needs to be revised // this needs to be revised
WellStateFullyImplicitBlackoil globalWellState_; WellStateFullyImplicitBlackoil globalWellState_;

View File

@ -261,18 +261,19 @@ namespace Opm
vtkWriter_->writeTimeStep( timer, localState, localWellState, false ); vtkWriter_->writeTimeStep( timer, localState, localWellState, false );
} }
if( parallelOutput_ ) bool isIORank = true ;
if( parallelOutput_ && parallelOutput_->isParallel() )
{ {
// collect all solutions to I/O rank // collect all solutions to I/O rank
const bool isIORank = parallelOutput_->collectToIORank( localState, localWellState ); isIORank = parallelOutput_->collectToIORank( localState, localWellState );
}
const SimulatorState& state = (parallelOutput_ && parallelOutput_->isParallel() ) ? parallelOutput_->globalReservoirState() : localState;
const WellState& wellState = (parallelOutput_ && parallelOutput_->isParallel() ) ? parallelOutput_->globalWellState() : localWellState;
// output is only done on I/O rank
if( isIORank ) if( isIORank )
{ {
const SimulatorState& state = parallelOutput_->globalReservoirState();
const WellState& wellState = parallelOutput_->globalWellState();
//std::cout << "number of wells" << wellState.bhp().size() << std::endl;
// Matlab output // Matlab output
if( matlabWriter_ ) { if( matlabWriter_ ) {
matlabWriter_->writeTimeStep( timer, state, wellState, substep ); matlabWriter_->writeTimeStep( timer, state, wellState, substep );
@ -320,9 +321,8 @@ namespace Opm
*/ */
backupfile_ << std::flush; backupfile_ << std::flush;
} }
} } // end backup
} } // end isIORank
}
} }
void void

View File

@ -200,7 +200,8 @@ namespace Opm
BlackoilOutputWriter(const Grid& grid, BlackoilOutputWriter(const Grid& grid,
const parameter::ParameterGroup& param, const parameter::ParameterGroup& param,
Opm::EclipseStateConstPtr eclipseState, Opm::EclipseStateConstPtr eclipseState,
const Opm::PhaseUsage &phaseUsage); const Opm::PhaseUsage &phaseUsage,
const double* permeability );
/** \copydoc Opm::OutputWriter::writeInit */ /** \copydoc Opm::OutputWriter::writeInit */
void writeInit(const SimulatorTimerInterface &timer); void writeInit(const SimulatorTimerInterface &timer);
@ -251,9 +252,10 @@ namespace Opm
BlackoilOutputWriter(const Grid& grid, BlackoilOutputWriter(const Grid& grid,
const parameter::ParameterGroup& param, const parameter::ParameterGroup& param,
Opm::EclipseStateConstPtr eclipseState, Opm::EclipseStateConstPtr eclipseState,
const Opm::PhaseUsage &phaseUsage ) const Opm::PhaseUsage &phaseUsage,
const double* permeability )
: output_( param.getDefault("output", true) ), : output_( param.getDefault("output", true) ),
parallelOutput_( output_ ? new ParallelDebugOutput< Grid >( grid, eclipseState, phaseUsage.num_phases ) : 0 ), parallelOutput_( output_ ? new ParallelDebugOutput< Grid >( grid, eclipseState, phaseUsage.num_phases, permeability ) : 0 ),
outputDir_( output_ ? param.getDefault("output_dir", std::string("output")) : "." ), outputDir_( output_ ? param.getDefault("output_dir", std::string("output")) : "." ),
output_interval_( output_ ? param.getDefault("output_interval", 1): 0 ), output_interval_( output_ ? param.getDefault("output_interval", 1): 0 ),
lastBackupReportStep_( -1 ), lastBackupReportStep_( -1 ),