Merge pull request #2802 from blattms/refactor-defunct-wells

Refactored how well information is exported after load balancing.
This commit is contained in:
Atgeirr Flø Rasmussen 2020-09-23 08:54:35 +02:00 committed by GitHub
commit 237b281f09
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 46 additions and 31 deletions

View File

@ -579,15 +579,15 @@ public:
void equilCartesianCoordinate(unsigned cellIdx, std::array<int,3>& ijk) const void equilCartesianCoordinate(unsigned cellIdx, std::array<int,3>& ijk) const
{ return asImp_().equilCartesianIndexMapper().cartesianCoordinate(cellIdx, ijk); } { return asImp_().equilCartesianIndexMapper().cartesianCoordinate(cellIdx, ijk); }
/*! /*!
* \brief Return the names of the wells which do not penetrate any cells on the local * \brief Returns vector with name and whether the has local perforated cells
* process. * for all wells.
* *
* This is a kludge around the fact that for distributed grids, not all wells are * Will only have usable values for CpGrid.
* seen by all proccesses.
*/ */
std::unordered_set<std::string> defunctWellNames() const const std::vector<std::pair<std::string,bool>>& parallelWells() const
{ return std::unordered_set<std::string>(); } { return parallelWells_; }
/*! /*!
* \brief Get the cell centroids for a distributed grid. * \brief Get the cell centroids for a distributed grid.
@ -683,6 +683,12 @@ protected:
* Empty otherwise. Used by EclTransmissibilty. * Empty otherwise. Used by EclTransmissibilty.
*/ */
std::vector<double> centroids_; std::vector<double> centroids_;
/*! \brief information about wells in parallel
*
* For each well in the model there is an entry with its name
* and a boolean indicating whether it perforates local cells.
*/
std::vector<std::pair<std::string,bool>> parallelWells_;
}; };
template <class TypeTag> template <class TypeTag>

View File

@ -218,7 +218,7 @@ public:
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, this->centroids_, PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, this->centroids_,
cartesianIndexMapper()); cartesianIndexMapper());
defunctWellNames_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data(), ownersFirst)); this->parallelWells_ = std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, faceTrans.data(), ownersFirst));
} }
catch(const std::bad_cast& e) catch(const std::bad_cast& e)
{ {
@ -295,9 +295,6 @@ public:
return *equilCartesianIndexMapper_; return *equilCartesianIndexMapper_;
} }
std::unordered_set<std::string> defunctWellNames() const
{ return defunctWellNames_; }
const EclTransmissibility<TypeTag>& globalTransmissibility() const const EclTransmissibility<TypeTag>& globalTransmissibility() const
{ {
assert( globalTrans_ != nullptr ); assert( globalTrans_ != nullptr );
@ -361,7 +358,6 @@ protected:
std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_; std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_;
std::unique_ptr<EclTransmissibility<TypeTag> > globalTrans_; std::unique_ptr<EclTransmissibility<TypeTag> > globalTrans_;
std::unordered_set<std::string> defunctWellNames_;
int mpiRank; int mpiRank;
}; };

View File

@ -322,8 +322,7 @@ public:
for (const auto& well: schedule.getWells(reportStepNum)) { for (const auto& well: schedule.getWells(reportStepNum)) {
// don't bother with wells not on this process // don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames(); if (isDefunctParallelWell(well.name())) {
if (defunctWellNames.find(well.name()) != defunctWellNames.end()) {
continue; continue;
} }
@ -903,8 +902,7 @@ public:
for (const auto& well: schedule.getWells(reportStepNum)) { for (const auto& well: schedule.getWells(reportStepNum)) {
// don't bother with wells not on this process // don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames(); if (isDefunctParallelWell(well.name())) {
if (defunctWellNames.find(well.name()) != defunctWellNames.end()) {
continue; continue;
} }
@ -1332,8 +1330,7 @@ public:
for (const auto& wname: schedule.wellNames(reportStepNum)) { for (const auto& wname: schedule.wellNames(reportStepNum)) {
// don't bother with wells not on this process // don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames(); if (isDefunctParallelWell(wname)) {
if (defunctWellNames.find(wname) != defunctWellNames.end()) {
continue; continue;
} }
@ -1432,8 +1429,7 @@ public:
for (const auto& wname: schedule.wellNames(reportStepNum)) { for (const auto& wname: schedule.wellNames(reportStepNum)) {
// don't bother with wells not on this process // don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames(); if (isDefunctParallelWell(wname)) {
if (defunctWellNames.find(wname) != defunctWellNames.end()) {
continue; continue;
} }
@ -1561,8 +1557,7 @@ public:
for (const auto& wname : schedule.wellNames(reportStepNum)) { for (const auto& wname : schedule.wellNames(reportStepNum)) {
// don't bother with wells not on this process // don't bother with wells not on this process
const auto& defunctWellNames = simulator_.vanguard().defunctWellNames(); if (isDefunctParallelWell(wname)) {
if (defunctWellNames.find(wname) != defunctWellNames.end()) {
continue; continue;
} }
@ -1830,6 +1825,17 @@ public:
private: private:
bool isDefunctParallelWell(std::string wname) const
{
if (simulator_.gridView().comm().size()==1)
return false;
const auto& parallelWells = simulator_.vanguard().parallelWells();
std::pair<std::string,bool> value{wname, true};
auto candidate = std::lower_bound(parallelWells.begin(), parallelWells.end(),
value);
return candidate == parallelWells.end() || *candidate != value;
}
bool isIORank_() const bool isIORank_() const
{ {
const auto& comm = simulator_.gridView().comm(); const auto& comm = simulator_.gridView().comm();

View File

@ -280,6 +280,8 @@ namespace Opm {
std::vector<bool> is_cell_perforated_; std::vector<bool> is_cell_perforated_;
std::function<bool(const Well&)> is_shut_or_defunct_;
void initializeWellPerfData(); void initializeWellPerfData();
// create the well container // create the well container

View File

@ -25,6 +25,7 @@
#include <opm/core/props/phaseUsageFromDeck.hpp> #include <opm/core/props/phaseUsageFromDeck.hpp>
#include <utility> #include <utility>
#include <algorithm>
namespace Opm { namespace Opm {
template<typename TypeTag> template<typename TypeTag>
@ -53,6 +54,18 @@ namespace Opm {
const auto& cartDims = Opm::UgGridHelpers::cartDims(grid); const auto& cartDims = Opm::UgGridHelpers::cartDims(grid);
setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid), setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid),
cartDims[0]*cartDims[1]*cartDims[2]); cartDims[0]*cartDims[1]*cartDims[2]);
is_shut_or_defunct_ = [&ebosSimulator](const Well& well) {
if (well.getStatus() == Well::Status::SHUT)
return true;
if (ebosSimulator.gridView().comm().size() == 1)
return false;
std::pair<std::string, bool> value{well.name(), true}; // false indicate not active!
const auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
auto candidate = std::lower_bound(parallel_wells.begin(), parallel_wells.end(),
value);
return candidate == parallel_wells.end() || *candidate != value;
};
} }
template<typename TypeTag> template<typename TypeTag>
@ -215,13 +228,9 @@ namespace Opm {
int globalNumWells = 0; int globalNumWells = 0;
// Make wells_ecl_ contain only this partition's non-shut wells. // Make wells_ecl_ contain only this partition's non-shut wells.
{ {
const auto& defunct_well_names = ebosSimulator_.vanguard().defunctWellNames();
auto is_shut_or_defunct = [&defunct_well_names](const Well& well) {
return (well.getStatus() == Well::Status::SHUT) || (defunct_well_names.find(well.name()) != defunct_well_names.end());
};
auto w = schedule().getWells(timeStepIdx); auto w = schedule().getWells(timeStepIdx);
globalNumWells = w.size(); globalNumWells = w.size();
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct), w.end()); w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end());
wells_ecl_.swap(w); wells_ecl_.swap(w);
} }
initializeWellPerfData(); initializeWellPerfData();
@ -529,13 +538,9 @@ namespace Opm {
int globalNumWells = 0; int globalNumWells = 0;
// Make wells_ecl_ contain only this partition's non-shut wells. // Make wells_ecl_ contain only this partition's non-shut wells.
{ {
const auto& defunct_well_names = ebosSimulator_.vanguard().defunctWellNames();
auto is_shut_or_defunct = [&defunct_well_names](const Well& well) {
return (well.getStatus() == Well::Status::SHUT) || (defunct_well_names.find(well.name()) != defunct_well_names.end());
};
auto w = schedule().getWells(report_step); auto w = schedule().getWells(report_step);
globalNumWells = w.size(); globalNumWells = w.size();
w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct), w.end()); w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end());
wells_ecl_.swap(w); wells_ecl_.swap(w);
} }