Refactored how well information is exported after load balancing.

Previously, we exported an unordered map containing all names of
wells that are not present in the local part of the grid.

As we envision to have wells that are distributed across multiple
processors, this information does not seem to be enough. We need
to be able to set up communication for each well. To do this we need
to find out who handles perforations of each well.

We now export a full list of well name together with a boolean
indicating whether it perforates local cells (vector of pair of string
and bool).
This commit is contained in:
Markus Blatt 2020-09-22 14:12:15 +02:00
parent a4ea6e9658
commit 1d94357558
5 changed files with 46 additions and 31 deletions

View File

@ -579,15 +579,15 @@ public:
void equilCartesianCoordinate(unsigned cellIdx, std::array<int,3>& ijk) const
{ return asImp_().equilCartesianIndexMapper().cartesianCoordinate(cellIdx, ijk); }
/*!
* \brief Return the names of the wells which do not penetrate any cells on the local
* process.
* \brief Returns vector with name and whether the has local perforated cells
* for all wells.
*
* This is a kludge around the fact that for distributed grids, not all wells are
* seen by all proccesses.
* Will only have usable values for CpGrid.
*/
std::unordered_set<std::string> defunctWellNames() const
{ return std::unordered_set<std::string>(); }
const std::vector<std::pair<std::string,bool>>& parallelWells() const
{ return parallelWells_; }
/*!
* \brief Get the cell centroids for a distributed grid.
@ -683,6 +683,12 @@ protected:
* Empty otherwise. Used by EclTransmissibilty.
*/
std::vector<double> centroids_;
/*! \brief information about wells in parallel
*
* For each well in the model there is an entry with its name
* and a boolean indicating whether it perforates local cells.
*/
std::vector<std::pair<std::string,bool>> parallelWells_;
};
template <class TypeTag>

View File

@ -218,7 +218,7 @@ public:
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, this->centroids_,
cartesianIndexMapper());
defunctWellNames_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data(), ownersFirst));
this->parallelWells_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data(), ownersFirst));
}
catch(const std::bad_cast& e)
{
@ -295,9 +295,6 @@ public:
return *equilCartesianIndexMapper_;
}
std::unordered_set<std::string> defunctWellNames() const
{ return defunctWellNames_; }
const EclTransmissibility<TypeTag>& globalTransmissibility() const
{
assert( globalTrans_ != nullptr );
@ -361,7 +358,6 @@ protected:
std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_;
std::unique_ptr<EclTransmissibility<TypeTag> > globalTrans_;
std::unordered_set<std::string> defunctWellNames_;
int mpiRank;
};

View File

@ -322,8 +322,7 @@ public:
for (const auto& well: schedule.getWells(reportStepNum)) {
// don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames();
if (defunctWellNames.find(well.name()) != defunctWellNames.end()) {
if (isDefunctParallelWell(well.name())) {
continue;
}
@ -903,8 +902,7 @@ public:
for (const auto& well: schedule.getWells(reportStepNum)) {
// don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames();
if (defunctWellNames.find(well.name()) != defunctWellNames.end()) {
if (isDefunctParallelWell(well.name())) {
continue;
}
@ -1332,8 +1330,7 @@ public:
for (const auto& wname: schedule.wellNames(reportStepNum)) {
// don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames();
if (defunctWellNames.find(wname) != defunctWellNames.end()) {
if (isDefunctParallelWell(wname)) {
continue;
}
@ -1432,8 +1429,7 @@ public:
for (const auto& wname: schedule.wellNames(reportStepNum)) {
// don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames();
if (defunctWellNames.find(wname) != defunctWellNames.end()) {
if (isDefunctParallelWell(wname)) {
continue;
}
@ -1561,8 +1557,7 @@ public:
for (const auto& wname : schedule.wellNames(reportStepNum)) {
// don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames();
if (defunctWellNames.find(wname) != defunctWellNames.end()) {
if (isDefunctParallelWell(wname)) {
continue;
}
@ -1830,6 +1825,17 @@ public:
private:
bool isDefunctParallelWell(std::string wname) const
{
if (simulator_.gridView().comm().size()==1)
return false;
const auto& parallelWells = simulator_.vanguard().parallelWells();
std::pair<std::string,bool> value{wname, true};
auto candidate = std::lower_bound(parallelWells.begin(), parallelWells.end(),
value);
return candidate == parallelWells.end() || *candidate != value;
}
bool isIORank_() const
{
const auto& comm = simulator_.gridView().comm();

View File

@ -280,6 +280,8 @@ namespace Opm {
std::vector<bool> is_cell_perforated_;
std::function<bool(const Well&)> is_shut_or_defunct_;
void initializeWellPerfData();
// create the well container

View File

@ -25,6 +25,7 @@
#include <opm/core/props/phaseUsageFromDeck.hpp>
#include <utility>
#include <algorithm>
namespace Opm {
template<typename TypeTag>
@ -53,6 +54,18 @@ namespace Opm {
const auto& cartDims = Opm::UgGridHelpers::cartDims(grid);
setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid),
cartDims[0]*cartDims[1]*cartDims[2]);
is_shut_or_defunct_ = [&ebosSimulator](const Well& well) {
if (well.getStatus() == Well::Status::SHUT)
return true;
if (ebosSimulator.gridView().comm().size() == 1)
return false;
std::pair<std::string, bool> value{well.name(), true}; // false indicate not active!
const auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
auto candidate = std::lower_bound(parallel_wells.begin(), parallel_wells.end(),
value);
return candidate == parallel_wells.end() || *candidate != value;
};
}
template<typename TypeTag>
@ -215,13 +228,9 @@ namespace Opm {
int globalNumWells = 0;
// Make wells_ecl_ contain only this partition's non-shut wells.
{
const auto& defunct_well_names = ebosSimulator_.vanguard().defunctWellNames();
auto is_shut_or_defunct = [&defunct_well_names](const Well& well) {
return (well.getStatus() == Well::Status::SHUT) || (defunct_well_names.find(well.name()) != defunct_well_names.end());
};
auto w = schedule().getWells(timeStepIdx);
globalNumWells = w.size();
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct), w.end());
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end());
wells_ecl_.swap(w);
}
initializeWellPerfData();
@ -529,13 +538,9 @@ namespace Opm {
int globalNumWells = 0;
// Make wells_ecl_ contain only this partition's non-shut wells.
{
const auto& defunct_well_names = ebosSimulator_.vanguard().defunctWellNames();
auto is_shut_or_defunct = [&defunct_well_names](const Well& well) {
return (well.getStatus() == Well::Status::SHUT) || (defunct_well_names.find(well.name()) != defunct_well_names.end());
};
auto w = schedule().getWells(report_step);
globalNumWells = w.size();
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct), w.end());
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end());
wells_ecl_.swap(w);
}