Merge pull request #5609 from vkip/use_only_active_wells_in_partitioning

In partitioning, only account for wells that will be active at some point in time
This commit is contained in:
Markus Blatt 2024-10-21 07:35:28 +02:00 committed by GitHub
commit afcfedb9f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 82 additions and 1 deletions

View File

@ -435,6 +435,8 @@ public:
this->outputModule_->assignToSolution(localCellData);
}
// Collect RFT data on rank 0
this->outputModule_->accumulateRftDataParallel(simulator_.gridView().comm());
// Add cell data to perforations for RFT output
this->outputModule_->addRftDataToWells(localWellData, reportStepNum);
}

View File

@ -35,6 +35,7 @@
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#include <opm/input/eclipse/EclipseState/Grid/LgrCollection.hpp>
#include <opm/simulators/utils/ParallelEclipseState.hpp>
@ -194,7 +195,7 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
}
const auto wells = ((mpiSize > 1) || partitionJacobiBlocks)
? schedule.getWellsatEnd()
? schedule.getActiveWellsAtEnd()
: std::vector<Well>{};
const auto& possibleFutureConnections = schedule.getPossibleFutureConnections();
// Distribute the grid and switch to the distributed view.
@ -207,6 +208,40 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
eclState1, parallelWells);
}
// Add inactive wells to all ranks with connections (not solved, so OK even without distributed wells)
std::unordered_set<unsigned> cellOnRank;
const auto& global_cells = this->grid_->globalCell();
for (const auto cell : global_cells) cellOnRank.insert(cell);
const auto& comm = this->grid_->comm();
const auto nranks = comm.size();
const auto inactive_well_names = schedule.getInactiveWellNamesAtEnd();
std::size_t num_wells = inactive_well_names.size();
std::vector<int> well_on_rank(num_wells, 0);
std::size_t well_idx = 0;
for (const auto& well_name : inactive_well_names) {
const auto& well = schedule.getWell(well_name, schedule.size()-1);
for (const auto& conn: well.getConnections()) {
if (cellOnRank.count(conn.global_index()) > 0) {
well_on_rank[well_idx] = 1;
break;
}
}
++well_idx;
}
// values from rank i will be at indices i*num_wells, i*num_wells + 1, ..., (i+1) * num_wells -1
std::vector<int> well_on_rank_global(num_wells * nranks, 0);
comm.allgather(well_on_rank.data(), static_cast<int>(num_wells), well_on_rank_global.data());
well_idx = 0;
for (const auto& well_name : inactive_well_names) {
for (int i=0; i<nranks; ++i) {
if (well_on_rank_global[i*num_wells + well_idx]) {
parallelWells.emplace_back(well_name, i);
}
}
++well_idx;
}
std::sort(parallelWells.begin(), parallelWells.end());
// Calling Schedule::filterConnections would remove any perforated
// cells that exist only on other ranks even in the case of
// distributed wells. But we need all connections to figure out the

View File

@ -391,6 +391,42 @@ outputFipAndResvLog(const Inplace& inplace,
}
}
template<class FluidSystem>
void GenericOutputBlackoilModule<FluidSystem>::
accumulateRftDataParallel(const Parallel::Communication& comm) {
if (comm.size() > 1) {
collectRftMapOnRoot(oilConnectionPressures_, comm);
collectRftMapOnRoot(waterConnectionSaturations_, comm);
collectRftMapOnRoot(gasConnectionSaturations_, comm);
}
}
template<class FluidSystem>
void GenericOutputBlackoilModule<FluidSystem>::
collectRftMapOnRoot(std::map<std::size_t, Scalar>& local_map, const Parallel::Communication& comm) {
std::vector<std::pair<int, Scalar>> pairs(local_map.begin(), local_map.end());
std::vector<std::pair<int, Scalar>> all_pairs;
std::vector<int> offsets;
std::tie(all_pairs, offsets) = Opm::gatherv(pairs, comm, 0);
// Insert/update map values on root
if (comm.rank() == 0) {
for (auto i=static_cast<std::size_t>(offsets[1]); i<all_pairs.size(); ++i) {
const auto& key_value = all_pairs[i];
if (auto candidate = local_map.find(key_value.first); candidate != local_map.end()) {
const Scalar prev_value = candidate->second;
candidate->second = std::max(prev_value, key_value.second);
} else {
local_map[key_value.first] = key_value.second;
}
}
}
}
template<class FluidSystem>
void GenericOutputBlackoilModule<FluidSystem>::
addRftDataToWells(data::Wells& wellDatas, std::size_t reportStepNum)

View File

@ -109,6 +109,8 @@ public:
void outputErrorLog(const Parallel::Communication& comm) const;
void accumulateRftDataParallel(const Parallel::Communication& comm);
void addRftDataToWells(data::Wells& wellDatas,
std::size_t reportStepNum);
@ -381,6 +383,8 @@ protected:
virtual bool isDefunctParallelWell(std::string wname) const = 0;
void collectRftMapOnRoot(std::map<std::size_t, Scalar>& local_map, const Parallel::Communication& comm);
const EclipseState& eclState_;
const Schedule& schedule_;
const SummaryState& summaryState_;

View File

@ -272,6 +272,7 @@ void WellState<Scalar>::init(const std::vector<Scalar>& cellPressures,
this->global_well_info = std::make_optional<GlobalWellInfo>(schedule,
report_step,
wells_ecl);
well_rates.clear();
for (const auto& wname : schedule.wellNames(report_step))
{
well_rates.insert({wname, std::make_pair(false, std::vector<Scalar>(this->numPhases()))});
@ -694,6 +695,9 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
for (int w = 0; w < nw; ++w) {
const auto& well_ecl = wells_ecl[w];
auto& ws = this->well(w);
// If the phase_rates has zero size this is an inactive well that will never be solved
if (ws.perf_data.phase_rates.size() == 0)
continue;
if (well_ecl.isMultiSegment()) {
const WellSegments& segment_set = well_ecl.getSegments();