Also distribute the centroids when loadbalancing CpGrid.

They are attached to the cells as well and are now distributed
during CpGrid::loadBalance. Due to this change we also rename
FieldPropsDataHandle to PropsCentroidsDataHandle.
This commit is contained in:
Markus Blatt
2020-03-04 21:06:21 +01:00
parent 1b03b040a3
commit 4c962e61d1
6 changed files with 78 additions and 65 deletions

View File

@@ -533,6 +533,15 @@ public:
std::unordered_set<std::string> defunctWellNames() const
{ return std::unordered_set<std::string>(); }
/*!
* \brief Get the cell centroids for a distributed grid.
*
* Currently this only non-empty for a loadbalanced CpGrid.
*/
const std::vector<double>& cellCentroids() const
{
return centroids_;
}
protected:
void callImplementationInit()
{
@@ -610,6 +619,12 @@ private:
Opm::SummaryConfig* eclSummaryConfig_;
Dune::EdgeWeightMethod edgeWeightsMethod_;
protected:
/*! \brief The cell centroids after loadbalance was called.
* Empty otherwise. Used by EclTransmissibilty.
*/
std::vector<double> centroids_;
};
template <class TypeTag>

View File

@@ -34,7 +34,7 @@
#include <opm/grid/CpGrid.hpp>
#include <opm/grid/cpgrid/GridHelpers.hpp>
#include <opm/simulators/utils/ParallelEclipseState.hpp>
#include <opm/simulators/utils/FieldPropsDataHandle.hpp>
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
#include <dune/grid/common/mcmgmapper.hh>
@@ -190,7 +190,15 @@ public:
{
const auto wells = this->schedule().getWellsatEnd();
auto& eclState = static_cast<ParallelEclipseState&>(this->eclState());
FieldPropsDataHandle<Dune::CpGrid> handle(*grid_, eclState);
const EclipseGrid* eclGrid = nullptr;
if (grid_->comm().rank() == 0)
{
eclGrid = &this->eclState().getInputGrid();
}
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, this->centroids_,
cartesianIndexMapper());
defunctWellNames_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data()));
}
grid_->switchToDistributedView();

View File

@@ -149,47 +149,7 @@ public:
for (unsigned dimIdx = 0; dimIdx < dimWorld; ++dimIdx)
axisCentroids[dimIdx].resize(numElements);
std::vector<double> centroids;
#if HAVE_MPI
size_t cells = vanguard_.grid().numCells();
if (global && comm.size() > 1) {
std::vector<size_t> sizes(comm.size());
if (comm.rank() == 0) {
const auto& eclGrid = eclState.getInputGrid();
comm.gather(&cells, sizes.data(), 1, 0);
for (int i = 1; i < comm.size(); ++i) {
std::vector<int> cell_id(sizes[i]);
MPI_Recv(cell_id.data(), sizes[i], MPI_INT,
i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
centroids.resize(dimWorld * sizes[i]);
auto cIt = centroids.begin();
for (int idx : cell_id) {
const auto& centroid = eclGrid.getCellCenter(idx);
for (const auto& it : centroid)
*cIt++ = it;
}
MPI_Send(centroids.data(), dimWorld * sizes[i],
MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
centroids.clear();
} else {
comm.gather(&cells, sizes.data(), 1, 0);
std::vector<int> cell_ids;
cell_ids.reserve(cells);
auto elemIt = gridView.template begin</*codim=*/ 0>();
const auto& elemEndIt = gridView.template end</*codim=*/ 0>();
for (; elemIt != elemEndIt; ++elemIt) {
const auto& elem = *elemIt;
cell_ids.push_back(cartMapper.cartesianIndex(elemMapper.index(elem)));
}
MPI_Send(cell_ids.data(), cells, MPI_INT, 0, 0, MPI_COMM_WORLD);
centroids.resize(cells * dimWorld);
MPI_Recv(centroids.data(), dimWorld * cells, MPI_DOUBLE,
0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
#endif
const std::vector<double>& centroids = vanguard_.cellCentroids();
auto elemIt = gridView.template begin</*codim=*/ 0>();
const auto& elemEndIt = gridView.template end</*codim=*/ 0>();