From fd3e3596a9ee1fbc8891899792c0a95da06ab81f Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Wed, 25 Jan 2017 20:36:37 +0100 Subject: [PATCH] Setup output writer and write initial values in parallel correctly. Before this commit only the solution of process 0 was written. To fix this we make the equilGrid of Ebos available. It is used for the output writer. The properties written initially are gathered from all processes first using the new gather/scatter utility. --- opm/autodiff/FlowMainEbos.hpp | 86 ++++++++++++++++++++++-- opm/autodiff/RedistributeDataHandles.hpp | 65 ++++++++++++++++++ 2 files changed, 144 insertions(+), 7 deletions(-) diff --git a/opm/autodiff/FlowMainEbos.hpp b/opm/autodiff/FlowMainEbos.hpp index b681cc573..1d02cf02e 100644 --- a/opm/autodiff/FlowMainEbos.hpp +++ b/opm/autodiff/FlowMainEbos.hpp @@ -32,6 +32,7 @@ #include #include #include +#include #include @@ -52,6 +53,28 @@ namespace Opm { + + /// \brief Gather cell data to global random access iterator + /// \tparam ConstIter The type of constant iterator. + /// \tparam Iter The type of the mutable iterator. + /// \param grid The distributed CpGrid where loadbalance has been run. + /// \param local The local container from which the data should be sent. + /// \param global The global container to gather to. + /// \warning The global container has to have the correct size! + template + void gatherCellDataToGlobalIterator(const Dune::CpGrid& grid, + const ConstIter& local_begin, + const Iter& global_begin) + { + FixedSizeIterCopyHandle handle(local_begin, + global_begin); + const auto& gatherScatterInf = grid.cellScatterGatherInterface(); + Dune::VariableSizeCommunicator<> comm(grid.comm(), + gatherScatterInf); + comm.backward(handle); + } + + // The FlowMain class is the ebos based black-oil simulator. class FlowMainEbos { @@ -376,7 +399,7 @@ namespace Opm } } - // Create grid and property objects. + // Create distributed property objects. // Writes to: // fluidprops_ void setupGridAndProps() @@ -541,13 +564,51 @@ namespace Opm { bool output = param_.getDefault("output", true); bool output_ecl = param_.getDefault("output_ecl", true); - const Grid& grid = this->grid(); - if( output && output_ecl && output_cout_) + if( output && output_ecl ) { - const EclipseGrid& inputGrid = eclState().getInputGrid(); - eclIO_.reset(new EclipseIO(eclState(), UgGridHelpers::createEclipseGrid( grid , inputGrid ))); - eclIO_->writeInitial(geoprops_->simProps(grid), - geoprops_->nonCartesianConnections()); + const Grid& grid = this->globalGrid(); + + if( output_cout_ ){ + const EclipseGrid& inputGrid = eclState().getInputGrid(); + eclIO_.reset(new EclipseIO(eclState(), UgGridHelpers::createEclipseGrid( grid , inputGrid ))); + } + + const NNC* nnc = &geoprops_->nonCartesianConnections(); + data::Solution globaltrans; + + if ( must_distribute_ ) + { + // dirty and dangerous hack! + // We rely on opmfil in GeoProps being hardcoded to true + // which prevents the pinch processing from running. + // Ergo the nncs are unchanged. + nnc = &eclState().getInputNNC(); + + // Gather the global simProps + data::Solution localtrans = geoprops_->simProps(this->grid()); + for( const auto& localkeyval: localtrans) + { + auto& globalval = globaltrans[localkeyval.first].data; + const auto& localval = localkeyval.second.data; + + if( output_cout_ ) + { + globalval.resize( grid.size(0)); + } + gatherCellDataToGlobalIterator(this->grid(), localval.begin(), + globalval.begin()); + } + } + else + { + globaltrans = geoprops_->simProps(grid); + } + + if( output_cout_ ) + { + eclIO_->writeInitial(globaltrans, + *nnc); + } } } @@ -561,6 +622,14 @@ namespace Opm eclState(), std::move( eclIO_ ), Opm::phaseUsageFromDeck(deck())) ); + // create output writer after grid is distributed, otherwise the parallel output + // won't work correctly since we need to create a mapping from the distributed to + // the global view + output_writer_.reset(new OutputWriter(grid(), + param_, + eclState(), + std::move(eclIO_), + Opm::phaseUsageFromDeck(deck())) ); } // Run the simulator. @@ -696,6 +765,9 @@ namespace Opm Grid& grid() { return ebosSimulator_->gridManager().grid(); } + const Grid& globalGrid() + { return ebosSimulator_->gridManager().equilGrid(); } + Problem& ebosProblem() { return ebosSimulator_->problem(); } diff --git a/opm/autodiff/RedistributeDataHandles.hpp b/opm/autodiff/RedistributeDataHandles.hpp index d0ea4951f..f4f0be4fe 100644 --- a/opm/autodiff/RedistributeDataHandles.hpp +++ b/opm/autodiff/RedistributeDataHandles.hpp @@ -24,6 +24,8 @@ #include #include +#include +#include #include @@ -52,6 +54,69 @@ distributeGridAndData( Grid& , return std::unordered_set(); } +/// \brief A handle that copies a fixed number data per index. +/// +/// It works on Iterators to allow for communicating C arrays. +/// \tparam Iter1 Constant random access iterator type. +/// \tparam Iter1 Mutable random access iterator type. +template +class FixedSizeIterCopyHandle +{ + typedef typename std::iterator_traits::value_type DataType2; + +public: + typedef typename std::iterator_traits::value_type DataType; + + /// \brief Constructor. + /// \param send_begin The begin iterator for sending. + /// \param receive_begin The begin iterator for receiving. + FixedSizeIterCopyHandle(const Iter1& send_begin, + const Iter2& receive_begin, + std::size_t size = 1) + : send_(send_begin), receive_(receive_begin), size_(size) + { + static_assert(std::is_same::value, + "Iter1 and Iter2 have to have the same value_type!"); + } + + template + void gather(Buffer& buffer, std::size_t i) + { + for(auto index = i*size(i), end = (i+1)*size(i); + index < end; ++index) + { + buffer.write(send_[index]); + } + } + + template + void scatter(Buffer& buffer, std::size_t i, std::size_t s) + { + assert(s==size(i)); + + for(auto index = i*size(i), end = (i+1)*size(i); + index < end; ++index) + { + buffer.read(receive_[index]); + } + } + + bool fixedsize() + { + return true; + } + + std::size_t size(std::size_t) + { + return size_; + } +private: + Iter1 send_; + Iter2 receive_; + std::size_t size_; +}; + + #if HAVE_OPM_GRID && HAVE_MPI /// \brief a data handle to distribute the threshold pressures class ThresholdPressureDataHandle