Merge pull request #1041 from blattms/fix-output_writer-setup

Fix output writer setup
This commit is contained in:
Atgeirr Flø Rasmussen 2017-03-17 18:12:52 +01:00 committed by GitHub
commit f504ca34f4
3 changed files with 167 additions and 15 deletions

View File

@ -32,6 +32,7 @@
#include <opm/autodiff/MissingFeatures.hpp>
#include <opm/autodiff/moduleVersion.hpp>
#include <opm/autodiff/ExtractParallelGridInformationToISTL.hpp>
#include <opm/autodiff/RedistributeDataHandles.hpp>
#include <opm/core/props/satfunc/RelpermDiagnostics.hpp>
@ -52,6 +53,30 @@
namespace Opm
{
/// \brief Gather cell data to global random access iterator
/// \tparam ConstIter The type of constant iterator.
/// \tparam Iter The type of the mutable iterator.
/// \param grid The distributed CpGrid where loadbalance has been run.
/// \param local The local container from which the data should be sent.
/// \param global The global container to gather to.
/// \warning The global container has to have the correct size!
template<class ConstIter, class Iter>
void gatherCellDataToGlobalIterator(const Dune::CpGrid& grid,
const ConstIter& local_begin,
const Iter& global_begin)
{
#if HAVE_MPI
FixedSizeIterCopyHandle<ConstIter,Iter> handle(local_begin,
global_begin);
const auto& gatherScatterInf = grid.cellScatterGatherInterface();
Dune::VariableSizeCommunicator<> comm(grid.comm(),
gatherScatterInf);
comm.backward(handle);
#endif
}
// The FlowMain class is the ebos based black-oil simulator.
class FlowMainEbos
{
@ -348,6 +373,10 @@ namespace Opm
ebosSimulator_.reset(new EbosSimulator(/*verbose=*/false));
ebosSimulator_->model().applyInitialSolution();
// Create a grid with a global view.
globalGrid_.reset(new Grid(grid()));
globalGrid_->switchToGlobalView();
try {
if (output_cout_) {
MissingFeatures::checkKeywords(deck());
@ -373,7 +402,7 @@ namespace Opm
}
}
// Create grid and property objects.
// Create distributed property objects.
// Writes to:
// fluidprops_
void setupGridAndProps()
@ -538,13 +567,51 @@ namespace Opm
{
bool output = param_.getDefault("output", true);
bool output_ecl = param_.getDefault("output_ecl", true);
const Grid& grid = this->grid();
if( output && output_ecl && output_cout_)
if( output && output_ecl )
{
const EclipseGrid& inputGrid = eclState().getInputGrid();
eclIO_.reset(new EclipseIO(eclState(), UgGridHelpers::createEclipseGrid( grid , inputGrid )));
eclIO_->writeInitial(geoprops_->simProps(grid),
geoprops_->nonCartesianConnections());
const Grid& grid = this->globalGrid();
if( output_cout_ ){
const EclipseGrid& inputGrid = eclState().getInputGrid();
eclIO_.reset(new EclipseIO(eclState(), UgGridHelpers::createEclipseGrid( grid , inputGrid )));
}
const NNC* nnc = &geoprops_->nonCartesianConnections();
data::Solution globaltrans;
if ( must_distribute_ )
{
// dirty and dangerous hack!
// We rely on opmfil in GeoProps being hardcoded to true
// which prevents the pinch processing from running.
// Ergo the nncs are unchanged.
nnc = &eclState().getInputNNC();
// Gather the global simProps
data::Solution localtrans = geoprops_->simProps(this->grid());
for( const auto& localkeyval: localtrans)
{
auto& globalval = globaltrans[localkeyval.first].data;
const auto& localval = localkeyval.second.data;
if( output_cout_ )
{
globalval.resize( grid.size(0));
}
gatherCellDataToGlobalIterator(this->grid(), localval.begin(),
globalval.begin());
}
}
else
{
globaltrans = geoprops_->simProps(grid);
}
if( output_cout_ )
{
eclIO_->writeInitial(globaltrans,
*nnc);
}
}
}
@ -553,10 +620,13 @@ namespace Opm
// output_writer_
void setupOutputWriter()
{
// create output writer after grid is distributed, otherwise the parallel output
// won't work correctly since we need to create a mapping from the distributed to
// the global view
output_writer_.reset(new OutputWriter(grid(),
param_,
eclState(),
std::move( eclIO_ ),
std::move(eclIO_),
Opm::phaseUsageFromDeck(deck())) );
}
@ -693,6 +763,9 @@ namespace Opm
Grid& grid()
{ return ebosSimulator_->gridManager().grid(); }
const Grid& globalGrid()
{ return *globalGrid_; }
Problem& ebosProblem()
{ return ebosSimulator_->problem(); }
@ -724,6 +797,8 @@ namespace Opm
std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver_;
std::unique_ptr<Simulator> simulator_;
std::string logFile_;
// Needs to be shared pointer because it gets initialzed before MPI_Init.
std::shared_ptr<Grid> globalGrid_;
};
} // namespace Opm

View File

@ -241,22 +241,33 @@ namespace Opm
enum { ioRank = 0 };
/// \brief Constructor
/// \param otherGrid The grid after loadbalance was run.
/// \param eclipseState The eclipse file parser output
/// \param numPhases The number of active phases.
/// \param permeability The permeabilities for the global(!) view.
ParallelDebugOutput( const Dune::CpGrid& otherGrid,
const EclipseState& eclipseState,
const int numPhases,
const Opm::PhaseUsage& phaseUsage)
: grid_(),
eclipseState_( eclipseState ),
toIORankComm_( otherGrid.comm() ),
globalCellData_(new data::Solution),
isIORank_( otherGrid.comm().rank() == ioRank ),
isIORank_(true),
phaseUsage_(phaseUsage)
{
// Switch to distributed view unconditionally for safety.
Dune::CpGrid distributed_grid = otherGrid;
const CollectiveCommunication& comm = otherGrid.comm();
if( comm.size() > 1 )
{
std::set< int > send, recv;
distributed_grid.switchToDistributedView();
toIORankComm_ = distributed_grid.comm();
isIORank_ = (distributed_grid.comm().rank() == ioRank);
// the I/O rank receives from all other ranks
if( isIORank() )
{
@ -296,10 +307,10 @@ namespace Opm
}
localIndexMap_.clear();
localIndexMap_.reserve( otherGrid.size( 0 ) );
localIndexMap_.reserve( distributed_grid.size( 0 ) );
unsigned int index = 0;
auto localView = otherGrid.leafGridView();
auto localView = distributed_grid.leafGridView();
for( auto it = localView.begin< 0 >(),
end = localView.end< 0 >(); it != end; ++it, ++index )
{
@ -322,13 +333,13 @@ namespace Opm
}
// distribute global id's to io rank for later association of dof's
DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
DistributeIndexMapping distIndexMapping( globalIndex_, distributed_grid.globalCell(), localIndexMap_, indexMaps_ );
toIORankComm_.exchange( distIndexMapping );
}
else // serial run
{
// copy global cartesian index
globalIndex_ = otherGrid.globalCell();
globalIndex_ = distributed_grid.globalCell();
}
}
@ -697,7 +708,7 @@ namespace Opm
// this needs to be revised
WellStateFullyImplicitBlackoil globalWellState_;
// true if we are on I/O rank
const bool isIORank_;
bool isIORank_;
// Phase usage needed to convert solution to simulation data container
Opm::PhaseUsage phaseUsage_;
};

View File

@ -24,11 +24,14 @@
#include <unordered_set>
#include <string>
#include <type_traits>
#include <iterator>
#include <opm/core/simulator/BlackoilState.hpp>
#include <opm/autodiff/BlackoilPropsAdFromDeck.hpp>
#include <opm/autodiff/ExtractParallelGridInformationToISTL.hpp>
#include <opm/autodiff/createGlobalCellArray.hpp>
#include<boost/any.hpp>
@ -51,6 +54,69 @@ distributeGridAndData( Grid& ,
return std::unordered_set<std::string>();
}
/// \brief A handle that copies a fixed number data per index.
///
/// It works on Iterators to allow for communicating C arrays.
/// \tparam Iter1 Constant random access iterator type.
/// \tparam Iter1 Mutable random access iterator type.
template<class Iter1, class Iter2=Iter1>
class FixedSizeIterCopyHandle
{
typedef typename std::iterator_traits<Iter1>::value_type DataType2;
public:
typedef typename std::iterator_traits<Iter1>::value_type DataType;
/// \brief Constructor.
/// \param send_begin The begin iterator for sending.
/// \param receive_begin The begin iterator for receiving.
FixedSizeIterCopyHandle(const Iter1& send_begin,
const Iter2& receive_begin,
std::size_t size = 1)
: send_(send_begin), receive_(receive_begin), size_(size)
{
static_assert(std::is_same<DataType,DataType2>::value,
"Iter1 and Iter2 have to have the same value_type!");
}
template<class Buffer>
void gather(Buffer& buffer, std::size_t i)
{
for(auto index = i*size(i), end = (i+1)*size(i);
index < end; ++index)
{
buffer.write(send_[index]);
}
}
template<class Buffer>
void scatter(Buffer& buffer, std::size_t i, std::size_t s)
{
assert(s==size(i));
for(auto index = i*size(i), end = (i+1)*size(i);
index < end; ++index)
{
buffer.read(receive_[index]);
}
}
bool fixedsize()
{
return true;
}
std::size_t size(std::size_t)
{
return size_;
}
private:
Iter1 send_;
Iter2 receive_;
std::size_t size_;
};
#if HAVE_OPM_GRID && HAVE_MPI
/// \brief a data handle to distribute the threshold pressures
class ThresholdPressureDataHandle