Communicate Inter-Region Flows to I/O Rank

This commit ensures that compute inter-region flow rates on all
ranks and collect those on the I/O rank using CollectDataToIORank.

We add a trivial EclInterRegFlowMap data member to the communication
object.  This data member only knows the pertinent FIP region array
names, but uses existing read/write support to collect contributions
from all ranks into this "global" object.  We then pass this global
object on to the summary evaluation routine.
This commit is contained in:
Bård Skaflestad
2022-02-14 16:43:36 +01:00
parent d7b5d0f686
commit 840a29f8ef
7 changed files with 187 additions and 35 deletions

View File

@@ -41,6 +41,14 @@
#include <cassert>
#include <stdexcept>
#include <string>
#include <vector>
namespace {
std::vector<std::string> toVector(const std::set<std::string>& string_set)
{
return { string_set.begin(), string_set.end() };
}
}
namespace Opm {
@@ -610,7 +618,6 @@ private:
WellTestState& global_;
};
class PackUnPackAquiferData : public P2PCommunicatorType::DataHandleInterface
{
const data::Aquifers& localAquiferData_;
@@ -750,14 +757,56 @@ private:
}
};
class PackUnpackInterRegFlows : public P2PCommunicatorType::DataHandleInterface
{
const EclInterRegFlowMap& localInterRegFlows_;
EclInterRegFlowMap& globalInterRegFlows_;
public:
PackUnpackInterRegFlows(const EclInterRegFlowMap& localInterRegFlows,
EclInterRegFlowMap& globalInterRegFlows,
const bool isIORank)
: localInterRegFlows_ (localInterRegFlows)
, globalInterRegFlows_(globalInterRegFlows)
{
if (! isIORank) { return; }
MessageBufferType buffer;
this->pack(0, buffer);
// pass a dummy_link to satisfy virtual class
const int dummyLink = -1;
this->unpack(dummyLink, buffer);
}
// pack all data associated with link
void pack(int link, MessageBufferType& buffer)
{
// we should only get one link
if (link != 0) {
throw std::logic_error {
"link in method pack is not 0 as expected"
};
}
// write all inter-region flow data
this->localInterRegFlows_.write(buffer);
}
// unpack all data associated with link
void unpack(int /*link*/, MessageBufferType& buffer)
{ this->globalInterRegFlows_.read(buffer); }
};
template <class Grid, class EquilGrid, class GridView>
CollectDataToIORank<Grid,EquilGrid,GridView>::
CollectDataToIORank(const Grid& grid, const EquilGrid* equilGrid,
const GridView& localGridView,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper)
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
const std::set<std::string>& fipRegionsInterregFlow)
: toIORankComm_(grid.comm())
, globalInterRegFlows_(EclInterRegFlowMap::createMapFromNames(toVector(fipRegionsInterregFlow)))
{
// index maps only have to be build when reordering is needed
if (!needsReordering && !isParallel())
@@ -878,7 +927,8 @@ collect(const data::Solution& localCellData,
const data::Wells& localWellData,
const data::GroupAndNetworkValues& localGroupAndNetworkData,
const data::Aquifers& localAquiferData,
const WellTestState& localWellTestState)
const WellTestState& localWellTestState,
const EclInterRegFlowMap& localInterRegFlows)
{
globalCellData_ = {};
globalBlockData_.clear();
@@ -887,6 +937,7 @@ collect(const data::Solution& localCellData,
globalGroupAndNetworkData_.clear();
globalAquiferData_.clear();
globalWellTestState_.clear();
this->globalInterRegFlows_.clear();
// index maps only have to be build when reordering is needed
if(!needsReordering && !isParallel())
@@ -943,6 +994,12 @@ collect(const data::Solution& localCellData,
this->isIORank()
};
PackUnpackInterRegFlows packUnpackInterRegFlows {
localInterRegFlows,
this->globalInterRegFlows_,
this->isIORank()
};
toIORankComm_.exchange(packUnpackCellData);
toIORankComm_.exchange(packUnpackWellData);
toIORankComm_.exchange(packUnpackGroupAndNetworkData);
@@ -950,6 +1007,7 @@ collect(const data::Solution& localCellData,
toIORankComm_.exchange(packUnpackWBPData);
toIORankComm_.exchange(packUnpackAquiferData);
toIORankComm_.exchange(packUnpackWellTestState);
toIORankComm_.exchange(packUnpackInterRegFlows);
#ifndef NDEBUG
// make sure every process is on the same page

View File

@@ -28,10 +28,13 @@
#include <opm/output/data/Solution.hpp>
#include <opm/output/data/Wells.hpp>
#include <opm/output/data/Groups.hpp>
#include <opm/input/eclipse/Schedule/Well/WellTestState.hpp>
#include <opm/grid/common/p2pcommunicator.hh>
#include <ebos/eclinterregflows.hh>
#include <map>
#include <utility>
#include <vector>
@@ -62,7 +65,8 @@ public:
const EquilGrid* equilGrid,
const GridView& gridView,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper);
const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
const std::set<std::string>& fipRegionsInterregFlow = {});
// gather solution to rank 0 for EclipseWriter
void collect(const data::Solution& localCellData,
@@ -71,7 +75,8 @@ public:
const data::Wells& localWellData,
const data::GroupAndNetworkValues& localGroupAndNetworkData,
const data::Aquifers& localAquiferData,
const WellTestState& localWellTestState);
const WellTestState& localWellTestState,
const EclInterRegFlowMap& interRegFlows);
const std::map<std::size_t, double>& globalWBPData() const
{ return this->globalWBPData_; }
@@ -94,6 +99,12 @@ public:
const data::Aquifers& globalAquiferData() const
{ return globalAquiferData_; }
EclInterRegFlowMap& globalInterRegFlows()
{ return this->globalInterRegFlows_; }
const EclInterRegFlowMap& globalInterRegFlows() const
{ return this->globalInterRegFlows_; }
bool isIORank() const
{ return toIORankComm_.rank() == ioRank; }
@@ -112,6 +123,7 @@ public:
protected:
P2PCommunicatorType toIORankComm_;
EclInterRegFlowMap globalInterRegFlows_;
IndexMapType globalCartesianIndex_;
IndexMapType localIndexMap_;
IndexMapStorageType indexMaps_;

View File

@@ -34,6 +34,7 @@
#include <opm/output/eclipse/Summary.hpp>
#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
#include <opm/input/eclipse/Schedule/Action/State.hpp>
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/input/eclipse/Schedule/SummaryState.hpp>
@@ -52,6 +53,12 @@
#include <mpi.h>
#endif
#include <array>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace {
/*!
@@ -102,6 +109,22 @@ bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
return false;
}
std::unordered_map<std::string, Opm::data::InterRegFlowMap>
getInterRegFlowsAsMap(const Opm::EclInterRegFlowMap& map)
{
auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
const auto& regionNames = map.names();
auto flows = map.getInterRegFlows();
const auto nmap = regionNames.size();
maps.reserve(nmap);
for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
maps.emplace(regionNames[mapID], std::move(flows[mapID]));
}
return maps;
}
struct EclWriteTasklet : public Opm::TaskletInterface
{
@@ -173,7 +196,8 @@ EclGenericWriter(const Schedule& schedule,
equilGrid,
gridView,
cartMapper,
equilCartMapper)
equilCartMapper,
summaryConfig.fip_regions_interreg_flow())
, grid_(grid)
, gridView_(gridView)
, schedule_(schedule)
@@ -450,8 +474,8 @@ doWriteOutput(const int reportStepNum,
template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
void EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
evalSummary(int reportStepNum,
Scalar curTime,
evalSummary(const int reportStepNum,
const Scalar curTime,
const std::map<std::size_t, double>& wbpData,
const data::Wells& localWellData,
const data::GroupAndNetworkValues& localGroupAndNetworkData,
@@ -459,10 +483,11 @@ evalSummary(int reportStepNum,
const std::map<std::pair<std::string, int>, double>& blockData,
const std::map<std::string, double>& miscSummaryData,
const std::map<std::string, std::vector<double>>& regionData,
SummaryState& summaryState,
UDQState& udqState,
const Inplace& inplace,
const Inplace& initialInPlace)
const Inplace& initialInPlace,
const EclInterRegFlowMap& interRegionFlowMap,
SummaryState& summaryState,
UDQState& udqState)
{
std::vector<char> buffer;
if (collectToIORank_.isIORank()) {
@@ -495,14 +520,13 @@ evalSummary(int reportStepNum,
wbp_calculators,
regionData,
blockData,
aquiferData);
aquiferData,
getInterRegFlowsAsMap(interRegionFlowMap));
/*
Off-by-one-fun: The reportStepNum argument corresponds to the
report step these results will be written to, whereas the argument
to UDQ function evaluation corresponds to the report step we are
currently on.
*/
// Off-by-one-fun: The reportStepNum argument corresponds to the
// report step these results will be written to, whereas the
// argument to UDQ function evaluation corresponds to the report
// step we are currently on.
auto udq_step = reportStepNum - 1;
const auto& udq_config = schedule_.getUDQConfig(udq_step);
udq_config.eval( udq_step, schedule_.wellMatcher(udq_step), summaryState, udqState);

View File

@@ -42,9 +42,9 @@
namespace Opm {
namespace Action { class State; }
class EclipseIO;
class EclipseState;
class EclInterRegFlowMap;
class Inplace;
struct NNCdata;
class Schedule;
@@ -52,6 +52,14 @@ class SummaryConfig;
class SummaryState;
class UDQState;
} // namespace Opm
namespace Opm { namespace Action {
class State;
}} // namespace Opm::Action
namespace Opm {
template <class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
class EclGenericWriter
{
@@ -119,10 +127,11 @@ protected:
const std::map<std::pair<std::string, int>, double>& blockData,
const std::map<std::string, double>& miscSummaryData,
const std::map<std::string, std::vector<double>>& regionData,
SummaryState& summaryState,
UDQState& udqState,
const Inplace& inplace,
const Inplace& initialInPlace);
const Inplace& initialInPlace,
const EclInterRegFlowMap& interRegionFlowMap,
SummaryState& summaryState,
UDQState& udqState);
CollectDataToIORankType collectToIORank_;
const Grid& grid_;

View File

@@ -111,6 +111,17 @@ assignGlobalMaxRegionID(const std::size_t regID)
//
// =====================================================================
Opm::EclInterRegFlowMap
Opm::EclInterRegFlowMap::createMapFromNames(std::vector<std::string> names)
{
auto map = EclInterRegFlowMap{};
map.names_ = std::move(names);
map.regionMaps_.resize(map.names_.size(), EclInterRegFlowMapSingleFIP{});
return map;
}
Opm::EclInterRegFlowMap::
EclInterRegFlowMap(const std::size_t numCells,
const std::vector<SingleRegion>& regions)

View File

@@ -38,6 +38,8 @@
namespace Opm {
class EclInterRegFlowMap;
/// Form CSR adjacency matrix representation of inter-region flow rate
/// graph provided as a list of connections between regions on local MPI
/// rank. Pertains to a single FIP definition array (e.g., FIPNUM).
@@ -56,6 +58,8 @@ namespace Opm {
bool isInterior{true};
};
friend class EclInterRegFlowMap;
/// Constructor
///
/// \param[in] region Local rank's FIP region definition array.
@@ -165,6 +169,9 @@ namespace Opm {
/// Whether or not this object contains contributions deserialised
/// from a stream. For error detection.
bool isReadFromStream_{false};
/// Default constructor.
EclInterRegFlowMapSingleFIP() = default;
};
/// Inter-region flow accumulation maps for all region definition arrays
@@ -188,6 +195,15 @@ namespace Opm {
/// Default constructor.
EclInterRegFlowMap() = default;
/// Special purpose constructor for global object being collected on
/// the I/O rank.
///
/// Only knows about the FIP region set names.
///
/// \param[in] names Sorted sequence of FIP region names.
static EclInterRegFlowMap
createMapFromNames(std::vector<std::string> names);
/// Constructor.
///
/// \param[in] numCells Number of cells on local MPI rank, including

View File

@@ -28,20 +28,19 @@
#ifndef EWOMS_ECL_WRITER_HH
#define EWOMS_ECL_WRITER_HH
#include "collecttoiorank.hh"
#include "ecloutputblackoilmodule.hh"
#include <ebos/collecttoiorank.hh>
#include <ebos/eclgenericwriter.hh>
#include <ebos/ecloutputblackoilmodule.hh>
#include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
#include <opm/simulators/utils/ParallelRestart.hpp>
#include <opm/input/eclipse/Units/UnitSystem.hpp>
#include <opm/simulators/utils/ParallelRestart.hpp>
#include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
#include <ebos/eclgenericwriter.hh>
#include <dune/grid/common/partitionset.hh>
#include <functional>
#include <limits>
#include <stdexcept>
#include <string>
namespace Opm::Properties {
@@ -190,15 +189,34 @@ public:
this->captureLocalFluxData();
if (this->collectToIORank_.isParallel())
if (this->collectToIORank_.isParallel()) {
OPM_BEGIN_PARALLEL_TRY_CATCH()
this->collectToIORank_.collect({},
eclOutputModule_->getBlockData(),
eclOutputModule_->getWBPData(),
localWellData,
localGroupAndNetworkData,
localAquiferData,
localWellTestState);
localWellTestState,
this->eclOutputModule_->getInterRegFlows());
if (this->collectToIORank_.isIORank()) {
auto& iregFlows = this->collectToIORank_.globalInterRegFlows();
if (! iregFlows.readIsConsistent()) {
throw std::runtime_error {
"Inconsistent inter-region flow "
"region set names in parallel"
};
}
iregFlows.compress();
}
OPM_END_PARALLEL_TRY_CATCH("Collect to I/O rank: ",
this->simulator_.vanguard().grid().comm());
}
std::map<std::string, double> miscSummaryData;
std::map<std::string, std::vector<double>> regionData;
@@ -246,9 +264,12 @@ public:
this->collectToIORank_.globalBlockData() :
this->eclOutputModule_->getBlockData(),
miscSummaryData, regionData,
summaryState(), udqState(),
inplace,
eclOutputModule_->initialInplace());
eclOutputModule_->initialInplace(),
this->collectToIORank_.isParallel()
? this->collectToIORank_.globalInterRegFlows()
: this->eclOutputModule_->getInterRegFlows(),
summaryState(), udqState());
eclOutputModule_->outputProdLog(reportStepNum, isSubStep, forceDisableProdOutput);
eclOutputModule_->outputInjLog(reportStepNum, isSubStep, forceDisableInjOutput);
@@ -285,7 +306,8 @@ public:
localWellData,
localGroupAndNetworkData,
localAquiferData,
localWellTestState);
localWellTestState,
{});
}
if (this->collectToIORank_.isIORank()) {