Merge pull request #2417 from blattms/loadbalance-centroids

Also distribute the centroids when loadbalancing CpGrid.
This commit is contained in:
Markus Blatt 2020-03-06 19:47:51 +01:00 committed by GitHub
commit eea15e5659
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 78 additions and 65 deletions

View File

@ -175,11 +175,11 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/utils/ParallelFileMerger.hpp
opm/simulators/utils/DeferredLoggingErrorHelpers.hpp
opm/simulators/utils/DeferredLogger.hpp
opm/simulators/utils/FieldPropsDataHandle.hpp
opm/simulators/utils/gatherDeferredLogger.hpp
opm/simulators/utils/moduleVersion.hpp
opm/simulators/utils/ParallelEclipseState.hpp
opm/simulators/utils/ParallelRestart.hpp
opm/simulators/utils/PropsCentroidsDataHandle.hpp
opm/simulators/wells/PerforationData.hpp
opm/simulators/wells/RateConverter.hpp
opm/simulators/wells/SimFIBODetails.hpp

View File

@ -533,6 +533,15 @@ public:
std::unordered_set<std::string> defunctWellNames() const
{ return std::unordered_set<std::string>(); }
/*!
* \brief Get the cell centroids for a distributed grid.
*
* Currently this only non-empty for a loadbalanced CpGrid.
*/
const std::vector<double>& cellCentroids() const
{
return centroids_;
}
protected:
void callImplementationInit()
{
@ -610,6 +619,12 @@ private:
Opm::SummaryConfig* eclSummaryConfig_;
Dune::EdgeWeightMethod edgeWeightsMethod_;
protected:
/*! \brief The cell centroids after loadbalance was called.
* Empty otherwise. Used by EclTransmissibilty.
*/
std::vector<double> centroids_;
};
template <class TypeTag>

View File

@ -34,7 +34,7 @@
#include <opm/grid/CpGrid.hpp>
#include <opm/grid/cpgrid/GridHelpers.hpp>
#include <opm/simulators/utils/ParallelEclipseState.hpp>
#include <opm/simulators/utils/FieldPropsDataHandle.hpp>
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
#include <dune/grid/common/mcmgmapper.hh>
@ -190,7 +190,15 @@ public:
{
const auto wells = this->schedule().getWellsatEnd();
auto& eclState = static_cast<ParallelEclipseState&>(this->eclState());
FieldPropsDataHandle<Dune::CpGrid> handle(*grid_, eclState);
const EclipseGrid* eclGrid = nullptr;
if (grid_->comm().rank() == 0)
{
eclGrid = &this->eclState().getInputGrid();
}
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, this->centroids_,
cartesianIndexMapper());
defunctWellNames_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data()));
}
grid_->switchToDistributedView();

View File

@ -149,47 +149,7 @@ public:
for (unsigned dimIdx = 0; dimIdx < dimWorld; ++dimIdx)
axisCentroids[dimIdx].resize(numElements);
std::vector<double> centroids;
#if HAVE_MPI
size_t cells = vanguard_.grid().numCells();
if (global && comm.size() > 1) {
std::vector<size_t> sizes(comm.size());
if (comm.rank() == 0) {
const auto& eclGrid = eclState.getInputGrid();
comm.gather(&cells, sizes.data(), 1, 0);
for (int i = 1; i < comm.size(); ++i) {
std::vector<int> cell_id(sizes[i]);
MPI_Recv(cell_id.data(), sizes[i], MPI_INT,
i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
centroids.resize(dimWorld * sizes[i]);
auto cIt = centroids.begin();
for (int idx : cell_id) {
const auto& centroid = eclGrid.getCellCenter(idx);
for (const auto& it : centroid)
*cIt++ = it;
}
MPI_Send(centroids.data(), dimWorld * sizes[i],
MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
centroids.clear();
} else {
comm.gather(&cells, sizes.data(), 1, 0);
std::vector<int> cell_ids;
cell_ids.reserve(cells);
auto elemIt = gridView.template begin</*codim=*/ 0>();
const auto& elemEndIt = gridView.template end</*codim=*/ 0>();
for (; elemIt != elemEndIt; ++elemIt) {
const auto& elem = *elemIt;
cell_ids.push_back(cartMapper.cartesianIndex(elemMapper.index(elem)));
}
MPI_Send(cell_ids.data(), cells, MPI_INT, 0, 0, MPI_COMM_WORLD);
centroids.resize(cells * dimWorld);
MPI_Recv(centroids.data(), dimWorld * cells, MPI_DOUBLE,
0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
#endif
const std::vector<double>& centroids = vanguard_.cellCentroids();
auto elemIt = gridView.template begin</*codim=*/ 0>();
const auto& elemEndIt = gridView.template end</*codim=*/ 0>();

View File

@ -20,7 +20,6 @@
#define PARALLEL_ECLIPSE_STATE_HPP
#include <opm/parser/eclipse/EclipseState/EclipseState.hpp>
//#include <opm/simulators/utils/FieldPropsDataHandle.hpp>
#include <dune/common/parallel/mpihelper.hh>
namespace Opm {
@ -41,7 +40,7 @@ public:
friend class ParallelEclipseState; //!< Friend so props can be setup.
//! \brief Friend to set up props
template<class Grid>
friend class FieldPropsDataHandle;
friend class PropsCentroidsDataHandle;
//! \brief Constructor.
//! \param manager The field property manager to wrap.
@ -106,7 +105,7 @@ protected:
class ParallelEclipseState : public EclipseState {
//! \brief Friend to set up props
template<class Grid>
friend class FieldPropsDataHandle;
friend class PropsCentroidsDataHandle;
public:
//! \brief Default constructor.
ParallelEclipseState();

View File

@ -23,8 +23,8 @@
* \author Markus Blatt, OPM-OP AS
*/
#ifndef FIELDPROPS_DATAHANDLE_HPP
#define FIELDPROPS_DATAHANDLE_HPP
#ifndef PROPS_CENTROIDS_DATAHANDLE_HPP
#define PROPS_CENTROIDS_DATAHANDLE_HPP
#if HAVE_MPI
#include <opm/simulators/utils/ParallelEclipseState.hpp>
@ -35,17 +35,18 @@
#include <dune/common/parallel/mpihelper.hh>
#include <unordered_map>
#include <iostream>
namespace Opm
{
/*!
* \brief A Data handle t communicate the field properties during load balance.
* \brief A Data handle to communicate the field properties and cell centroids during load balance.
* \tparam Grid The type of grid where the load balancing is happening.
* \todo Maybe specialize this for CpGrid to save some space, later.
*/
template<class Grid>
class FieldPropsDataHandle
: public Dune::CommDataHandleIF< FieldPropsDataHandle<Grid>, double>
class PropsCentroidsDataHandle
: public Dune::CommDataHandleIF< PropsCentroidsDataHandle<Grid>, double>
{
public:
//! \brief the data type we send (ints are converted to double)
@ -55,8 +56,17 @@ public:
//! \param grid The grid where the loadbalancing is happening.
//! \param globalProps The field properties of the global grid
//! \param distributedProps The distributed field properties
FieldPropsDataHandle(const Grid& grid, ParallelEclipseState& eclState)
: m_grid(grid), m_distributed_fieldProps(eclState.m_fieldProps)
//! \param eclGridOnRoot A pointer to eclipse grid on rank zero,
//! nullptr otherwise.
//! \param centroids Array to store the centroids in upon destruction
//! of the object.
//! \param cartMapper The cartesian index mapper of the grid.
PropsCentroidsDataHandle(const Grid& grid, ParallelEclipseState& eclState,
const EclipseGrid* eclGridOnRoot,
std::vector<double>& centroids,
const typename Dune::CartesianIndexMapper<Grid>& cartMapper)
: m_grid(grid), m_distributed_fieldProps(eclState.m_fieldProps),
m_centroids(centroids)
{
// Scatter the keys
const auto& comm = Dune::MPIHelper::getCollectiveCommunication();
@ -75,7 +85,8 @@ public:
comm.broadcast(buffer.data(), position, 0);
// copy data to persistent map based on local id
auto noData = m_intKeys.size() + m_doubleKeys.size();
m_no_data = m_intKeys.size() + m_doubleKeys.size() +
Grid::dimensionworld;
const auto& idSet = m_grid.localIdSet();
const auto& gridView = m_grid.levelGridView(0);
using ElementMapper =
@ -87,13 +98,18 @@ public:
const auto& id = idSet.id(element);
auto index = elemMapper.index(element);
auto& data = elementData_[id];
data.reserve(noData);
data.reserve(m_no_data);
for(const auto& intKey : m_intKeys)
for (const auto& intKey : m_intKeys)
data.push_back(globalProps.get_int(intKey)[index]);
for(const auto& doubleKey : m_doubleKeys)
for (const auto& doubleKey : m_doubleKeys)
data.push_back(globalProps.get_double(doubleKey)[index]);
auto cartIndex = cartMapper.cartesianIndex(index);
const auto& center = eclGridOnRoot->getCellCenter(cartIndex);
for (int dim = 0; dim < Grid::dimensionworld; ++dim)
data.push_back(center[dim]);
}
}
else
@ -105,12 +121,12 @@ public:
int position{};
Mpi::unpack(m_intKeys, buffer, position, comm);
Mpi::unpack(m_doubleKeys, buffer, position, comm);
m_no_data = m_intKeys.size() + m_doubleKeys.size() +
Grid::dimensionworld;
}
}
FieldPropsDataHandle(const FieldPropsDataHandle&) = delete;
~FieldPropsDataHandle()
~PropsCentroidsDataHandle()
{
// distributed grid is now correctly set up.
for(const auto& intKey : m_intKeys)
@ -119,6 +135,8 @@ public:
for(const auto& doubleKey : m_doubleKeys)
m_distributed_fieldProps.m_doubleProps[doubleKey].resize(m_grid.size(0));
m_centroids.resize(m_grid.size(0) * Grid::dimensionworld);
// copy data for the persistent mao to the field properties
const auto& idSet = m_grid.localIdSet();
const auto& gridView = m_grid.levelGridView(0);
@ -139,6 +157,11 @@ public:
for(const auto& doubleKey : m_doubleKeys)
m_distributed_fieldProps.m_doubleProps[doubleKey][index] = data->second[counter++];
auto centroidIter = m_centroids.begin() + Grid::dimensionworld * index;
auto centroidIterEnd = centroidIter + Grid::dimensionworld;
for ( ; centroidIter != centroidIterEnd; ++centroidIter )
*centroidIter = data->second[counter++];
}
}
@ -159,7 +182,7 @@ public:
template<class EntityType>
std::size_t size(const EntityType /* entity */)
{
return m_intKeys.size() + m_doubleKeys.size();
return m_no_data;
}
template<class BufferType, class EntityType>
@ -174,7 +197,7 @@ public:
template<class BufferType, class EntityType>
void scatter(BufferType& buffer, const EntityType& e, std::size_t n)
{
assert(n == m_intKeys.size() + m_doubleKeys.size());
assert(n == m_no_data);
auto& array = rcvdElementData_[m_grid.localIdSet().id(e)];
array.resize(n);
for(auto& data: array)
@ -194,11 +217,19 @@ private:
std::vector<std::string> m_doubleKeys;
/// \brief The data per element as a vector mapped from the local id.
std::unordered_map<typename LocalIdSet::IdType, std::vector<double> > elementData_;
/// \brief The data per element as a vector mapped from the local id.
/*! \brief The data received per element as a vector mapped from the local id.
*
* Needed because CpGrid is in violation of the requirement that local ids
* need to be persistent across grid modifications
*/
std::unordered_map<typename LocalIdSet::IdType, std::vector<double> > rcvdElementData_;
/// \brief The cell centroids of the distributed grid.
std::vector<double>& m_centroids;
/// \brief The amount of data to send for each element
std::size_t m_no_data;
};
} // end namespace Opm
#endif // HAVE_MPI
#endif // FIELDPROPS_DATAHANDLE_HPP
#endif // PROPS_CENTROIDS_DATAHANDLE_HPP