Use CpGrid::loadBalance to distribute the field properties.

The created data handle for the communication could in theory be used
with other DUNE grids, too. In reality we will need to merge with the
handle that ALUGrid already uses to communicate the cartesian indices.

This PR gets rid of using the get_global_(double|int) method in
ParallelEclipseState and reduces the amount of boilerplate code there.
This commit is contained in:
Markus Blatt 2020-03-04 12:46:22 +01:00
parent 123a9d4579
commit 77478a59eb
5 changed files with 213 additions and 137 deletions

View File

@ -175,6 +175,7 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/utils/ParallelFileMerger.hpp
opm/simulators/utils/DeferredLoggingErrorHelpers.hpp
opm/simulators/utils/DeferredLogger.hpp
opm/simulators/utils/FieldPropsDataHandle.hpp
opm/simulators/utils/gatherDeferredLogger.hpp
opm/simulators/utils/moduleVersion.hpp
opm/simulators/utils/ParallelEclipseState.hpp

View File

@ -34,6 +34,7 @@
#include <opm/grid/CpGrid.hpp>
#include <opm/grid/cpgrid/GridHelpers.hpp>
#include <opm/simulators/utils/ParallelEclipseState.hpp>
#include <opm/simulators/utils/FieldPropsDataHandle.hpp>
#include <dune/grid/common/mcmgmapper.hh>
@ -188,7 +189,9 @@ public:
//distribute the grid and switch to the distributed view.
{
const auto wells = this->schedule().getWellsatEnd();
defunctWellNames_ = std::get<1>(grid_->loadBalance(edgeWeightsMethod, &wells, faceTrans.data()));
auto& eclState = static_cast<ParallelEclipseState&>(this->eclState());
FieldPropsDataHandle<Dune::CpGrid> handle(*grid_, eclState);
defunctWellNames_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data()));
}
grid_->switchToDistributedView();
@ -210,14 +213,6 @@ public:
this->updateGridView_();
#if HAVE_MPI
if (mpiSize > 1) {
std::vector<int> cartIndices;
cartIndices.reserve(grid_->numCells());
auto locElemIt = this->gridView().template begin</*codim=*/0>();
const auto& locElemEndIt = this->gridView().template end</*codim=*/0>();
for (; locElemIt != locElemEndIt; ++locElemIt) {
cartIndices.push_back(cartesianIndexMapper_->cartesianIndex(locElemIt->index()));
}
static_cast<ParallelEclipseState&>(this->eclState()).setupLocalProps(cartIndices);
static_cast<ParallelEclipseState&>(this->eclState()).switchToDistributedProps();
}
#endif

View File

@ -0,0 +1,201 @@
/*
Copyright 2020 Equinor AS.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
* \file FieldPropsDatahandle.hpp
* \brief File containing a data handle for communicating the FieldProperties
*
* \author Markus Blatt, OPM-OP AS
*/
#ifndef FIELDPROPS_DATAHANDLE_HPP
#define FIELDPROPS_DATAHANDLE_HPP
#include <opm/simulators/utils/ParallelEclipseState.hpp>
#include <opm/simulators/utils/ParallelRestart.hpp>
#include <dune/grid/common/datahandleif.hh>
#include <dune/grid/common/mcmgmapper.hh>
#include <dune/grid/common/partitionset.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <unordered_map>
#include <iostream>
namespace Opm
{
/*!
* \brief A Data handle t communicate the field properties during load balance.
* \tparam Grid The type of grid where the load balancing is happening.
* \todo Maybe specialize this for CpGrid to save some space, later.
*/
template<class Grid>
class FieldPropsDataHandle
: public Dune::CommDataHandleIF< FieldPropsDataHandle<Grid>, double>
{
public:
//! \brief the data type we send (ints are converted to double)
using DataType = double;
//! \brief Constructor
//! \param grid The grid where the loadbalancing is happening.
//! \param globalProps The field properties of the global grid
//! \param distributedProps The distributed field properties
FieldPropsDataHandle(const Grid& grid, ParallelEclipseState& eclState)
: m_grid(grid), m_distributed_fieldProps(eclState.m_fieldProps)
{
// Scatter the keys
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
if (comm.rank() == 0)
{
const auto& globalProps = eclState.globalFieldProps();
m_intKeys = globalProps.keys<int>();
m_doubleKeys = globalProps.keys<double>();
std::size_t packSize = Mpi::packSize(m_intKeys, comm) +
Mpi::packSize(m_doubleKeys,comm);
std::vector<char> buffer(packSize);
int position = 0;
Mpi::pack(m_intKeys, buffer, position, comm);
Mpi::pack(m_doubleKeys, buffer, position, comm);
comm.broadcast(&position, 1, 0);
comm.broadcast(buffer.data(), position, 0);
// copy data to persistent map based on local id
auto noData = m_intKeys.size() + m_doubleKeys.size();
const auto& idSet = m_grid.localIdSet();
const auto& gridView = m_grid.levelGridView(0);
using ElementMapper =
Dune::MultipleCodimMultipleGeomTypeMapper<typename Grid::LevelGridView>;
ElementMapper elemMapper(gridView, Dune::mcmgElementLayout());
for( const auto &element : elements( gridView, Dune::Partitions::interiorBorder ) )
{
const auto& id = idSet.id(element);
auto index = elemMapper.index(element);
auto& data = elementData_[id];
data.reserve(noData);
for(const auto& intKey : m_intKeys)
data.push_back(globalProps.get_int(intKey)[index]);
for(const auto& doubleKey : m_doubleKeys)
data.push_back(globalProps.get_double(doubleKey)[index]);
}
}
else
{
int bufferSize;
comm.broadcast(&bufferSize, 1, 0);
std::vector<char> buffer(bufferSize);
comm.broadcast(buffer.data(), bufferSize, 0);
int position{};
Mpi::unpack(m_intKeys, buffer, position, comm);
Mpi::unpack(m_doubleKeys, buffer, position, comm);
}
}
FieldPropsDataHandle(const FieldPropsDataHandle&) = delete;
~FieldPropsDataHandle()
{
// distributed grid is now correctly set up.
for(const auto& intKey : m_intKeys)
m_distributed_fieldProps.m_intProps[intKey].resize(m_grid.size(0));
for(const auto& doubleKey : m_doubleKeys)
m_distributed_fieldProps.m_doubleProps[doubleKey].resize(m_grid.size(0));
// copy data for the persistent mao to the field properties
const auto& idSet = m_grid.localIdSet();
const auto& gridView = m_grid.levelGridView(0);
using ElementMapper =
Dune::MultipleCodimMultipleGeomTypeMapper<typename Grid::LevelGridView>;
ElementMapper elemMapper(gridView, Dune::mcmgElementLayout());
for( const auto &element : elements( gridView, Dune::Partitions::all ) )
{
std::size_t counter{};
const auto& id = idSet.id(element);
auto index = elemMapper.index(element);
auto data = rcvdElementData_.find(id);
assert(data != elementData_.end());
for(const auto& intKey : m_intKeys)
m_distributed_fieldProps.m_intProps[intKey][index] = static_cast<int>(data->second[counter++]);
for(const auto& doubleKey : m_doubleKeys)
m_distributed_fieldProps.m_doubleProps[doubleKey][index] = data->second[counter++];
}
}
bool contains(int /* dim */, int codim)
{
return codim == 0;
}
bool fixedsize(int /* dim */, int /* codim */)
{
return true;
}
bool fixedSize(int /* dim */, int /* codim */)
{
return true;
}
template<class EntityType>
std::size_t size(const EntityType /* entity */)
{
return m_intKeys.size() + m_doubleKeys.size();
}
template<class BufferType, class EntityType>
void gather(BufferType& buffer, const EntityType& e) const
{
auto iter = elementData_.find(m_grid.localIdSet().id(e));
assert(iter != elementData_.end());
for(const auto& data : iter->second)
buffer.write(data);
}
template<class BufferType, class EntityType>
void scatter(BufferType& buffer, const EntityType& e, std::size_t n)
{
assert(n == m_intKeys.size() + m_doubleKeys.size());
auto& array = rcvdElementData_[m_grid.localIdSet().id(e)];
array.resize(n);
for(auto& data: array)
{
buffer.read(data);
}
}
private:
using LocalIdSet = typename Grid::LocalIdSet;
const Grid& m_grid;
//! \brief The distributed field properties for receiving
ParallelFieldPropsManager& m_distributed_fieldProps;
//! \brief The names of the keys of the integer fields.
std::vector<std::string> m_intKeys;
//! \brief The names of the keys of the double fields.
std::vector<std::string> m_doubleKeys;
/// \brief The data per element as a vector mapped from the local id.
std::unordered_map<typename LocalIdSet::IdType, std::vector<double> > elementData_;
/// \brief The data per element as a vector mapped from the local id.
std::unordered_map<typename LocalIdSet::IdType, std::vector<double> > rcvdElementData_;
};
} // end namespace Opm
#endif // FIELDPROPS_DATAHANDLE_HPP

View File

@ -232,125 +232,4 @@ void ParallelEclipseState::switchToDistributedProps()
m_parProps = true;
}
#if HAVE_MPI
namespace {
template<class T>
struct GetField {
GetField(const FieldPropsManager& propMan) : props(propMan) {}
std::vector<T> getField(const std::string& key) const;
const FieldPropsManager& props;
};
template<>
std::vector<int> GetField<int>::getField(const std::string& key) const {
return props.get_global_int(key);
}
template<>
std::vector<double> GetField<double>::getField(const std::string& key) const {
return props.get_global_double(key);
}
template<class T>
void extractRootProps(const std::vector<int>& localToGlobal,
const std::vector<std::string>& keys,
const GetField<T>& getter,
std::map<std::string,std::vector<T>>& localMap)
{
for (const std::string& key : keys) {
auto prop = getter.getField(key);
std::vector<T>& local = localMap[key];
local.reserve(localToGlobal.size());
for (int cell : localToGlobal) {
local.push_back(prop[cell]);
}
}
}
template<class T>
void packProps(const std::vector<int>& l2gCell,
const std::vector<std::string>& keys,
const GetField<T>& getter,
std::vector<char>& buffer, int& position)
{
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
std::vector<T> sendData(l2gCell.size());
for (const std::string& key : keys) {
auto prop = getter.getField(key);
size_t idx = 0;
for (int cell : l2gCell)
sendData[idx++] = prop[cell];
Mpi::pack(sendData, buffer, position, comm);
}
}
}
void ParallelEclipseState::setupLocalProps(const std::vector<int>& localToGlobal)
{
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
if (comm.rank() == 0) {
extractRootProps(localToGlobal, this->globalFieldProps().keys<int>(),
GetField<int>(this->globalFieldProps()),
m_fieldProps.m_intProps);
extractRootProps(localToGlobal, this->globalFieldProps().keys<double>(),
GetField<double>(this->globalFieldProps()),
m_fieldProps.m_doubleProps);
for (int i = 1; i < comm.size(); ++i) {
std::vector<int> l2gCell;
size_t size;
MPI_Recv(&size, 1, Dune::MPITraits<size_t>::getType(), i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
l2gCell.resize(size);
MPI_Recv(l2gCell.data(), size, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
size_t cells = l2gCell.size();
const auto& intKeys = this->globalFieldProps().keys<int>();
const auto& dblKeys = this->globalFieldProps().keys<double>();
size = Mpi::packSize(intKeys, comm) +
Mpi::packSize(dblKeys,comm) +
intKeys.size() * Mpi::packSize(std::vector<int>(cells), comm) +
dblKeys.size() * Mpi::packSize(std::vector<double>(cells), comm);
std::vector<char> buffer(size);
int position = 0;
Mpi::pack(intKeys, buffer, position, comm);
Mpi::pack(dblKeys, buffer, position, comm);
packProps(l2gCell, intKeys, GetField<int>(this->globalFieldProps()),
buffer, position);
packProps(l2gCell, dblKeys, GetField<double>(this->globalFieldProps()),
buffer, position);
MPI_Send(&position, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(buffer.data(), position, MPI_CHAR, i, 0, MPI_COMM_WORLD);
}
} else {
size_t l2gSize = localToGlobal.size();
MPI_Send(&l2gSize, 1, Dune::MPITraits<size_t>::getType(), 0, 0, MPI_COMM_WORLD);
MPI_Send(localToGlobal.data(), localToGlobal.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
int size;
MPI_Recv(&size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
std::vector<char> buffer(size);
MPI_Recv(buffer.data(), size, MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
std::vector<std::string> intKeys, dblKeys;
int position = 0;
Mpi::unpack(intKeys, buffer, position, comm);
Mpi::unpack(dblKeys, buffer, position, comm);
for (const std::string& key : intKeys) {
Mpi::unpack(m_fieldProps.m_intProps[key], buffer, position, comm);
}
for (const std::string& key : dblKeys) {
Mpi::unpack(m_fieldProps.m_doubleProps[key], buffer, position, comm);
}
}
}
#endif
} // end namespace Opm

View File

@ -20,6 +20,7 @@
#define PARALLEL_ECLIPSE_STATE_HPP
#include <opm/parser/eclipse/EclipseState/EclipseState.hpp>
//#include <opm/simulators/utils/FieldPropsDataHandle.hpp>
#include <dune/common/parallel/mpihelper.hh>
namespace Opm {
@ -38,6 +39,9 @@ class EclMpiSerializer;
class ParallelFieldPropsManager : public FieldPropsManager {
public:
friend class ParallelEclipseState; //!< Friend so props can be setup.
//! \brief Friend to set up props
template<class Grid>
friend class FieldPropsDataHandle;
//! \brief Constructor.
//! \param manager The field property manager to wrap.
@ -100,6 +104,9 @@ protected:
*/
class ParallelEclipseState : public EclipseState {
//! \brief Friend to set up props
template<class Grid>
friend class FieldPropsDataHandle;
public:
//! \brief Default constructor.
ParallelEclipseState();
@ -135,13 +142,6 @@ public:
//! setupLocalProps must be called prior to this.
void switchToDistributedProps();
#if HAVE_MPI
//! \brief Setup local properties.
//! \param localToGlobal Map from local cells on calling process to global cartesian cell
//! \details Must be called after grid has been paritioned
void setupLocalProps(const std::vector<int>& localToGlobal);
#endif
//! \brief Returns a const ref to current field properties.
const FieldPropsManager& fieldProps() const override;