mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Info message, cmdline option and avoid splitting wells on restart (for now)
This commit is contained in:
@@ -150,6 +150,7 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
|
||||
const Dune::PartitionMethod partitionMethod,
|
||||
const bool serialPartitioning,
|
||||
const bool enableDistributedWells,
|
||||
const bool allowSplittingInactiveWells,
|
||||
const double imbalanceTol,
|
||||
const GridView& gridView,
|
||||
const Schedule& schedule,
|
||||
@@ -194,9 +195,15 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
|
||||
}
|
||||
}
|
||||
|
||||
// Skipping inactive wells in partitioning currently does not play nice with restart..
|
||||
const bool restart = eclState1.getInitConfig().restartRequested();
|
||||
const bool split_inactive = (!restart && allowSplittingInactiveWells);
|
||||
const auto wells = ((mpiSize > 1) || partitionJacobiBlocks)
|
||||
? schedule.getActiveWellsAtEnd()
|
||||
? split_inactive
|
||||
? schedule.getActiveWellsAtEnd()
|
||||
: schedule.getWellsatEnd()
|
||||
: std::vector<Well>{};
|
||||
|
||||
const auto& possibleFutureConnections = schedule.getPossibleFutureConnections();
|
||||
// Distribute the grid and switch to the distributed view.
|
||||
if (mpiSize > 1) {
|
||||
@@ -209,36 +216,44 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
|
||||
}
|
||||
|
||||
// Add inactive wells to all ranks with connections (not solved, so OK even without distributed wells)
|
||||
std::unordered_set<unsigned> cellOnRank;
|
||||
const auto& global_cells = this->grid_->globalCell();
|
||||
for (const auto cell : global_cells) cellOnRank.insert(cell);
|
||||
const auto& comm = this->grid_->comm();
|
||||
const auto nranks = comm.size();
|
||||
const auto inactive_well_names = schedule.getInactiveWellNamesAtEnd();
|
||||
std::size_t num_wells = inactive_well_names.size();
|
||||
std::vector<int> well_on_rank(num_wells, 0);
|
||||
std::size_t well_idx = 0;
|
||||
for (const auto& well_name : inactive_well_names) {
|
||||
const auto& well = schedule.getWell(well_name, schedule.size()-1);
|
||||
for (const auto& conn: well.getConnections()) {
|
||||
if (cellOnRank.count(conn.global_index()) > 0) {
|
||||
well_on_rank[well_idx] = 1;
|
||||
break;
|
||||
if (split_inactive) {
|
||||
std::unordered_set<unsigned> cellOnRank;
|
||||
const auto& global_cells = this->grid_->globalCell();
|
||||
for (const auto cell : global_cells) cellOnRank.insert(cell);
|
||||
const auto inactive_well_names = schedule.getInactiveWellNamesAtEnd();
|
||||
std::size_t num_wells = inactive_well_names.size();
|
||||
std::vector<int> well_on_rank(num_wells, 0);
|
||||
std::size_t well_idx = 0;
|
||||
for (const auto& well_name : inactive_well_names) {
|
||||
const auto& well = schedule.getWell(well_name, schedule.size()-1);
|
||||
for (const auto& conn: well.getConnections()) {
|
||||
if (cellOnRank.count(conn.global_index()) > 0) {
|
||||
well_on_rank[well_idx] = 1;
|
||||
parallelWells.emplace_back(well_name, true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!well_on_rank[well_idx]) parallelWells.emplace_back(well_name, false);
|
||||
++well_idx;
|
||||
}
|
||||
|
||||
// Provide information message
|
||||
const auto& comm = this->grid_->comm();
|
||||
const auto nranks = comm.size();
|
||||
// // values from rank i will be at indices i*num_wells, i*num_wells + 1, ..., (i+1) * num_wells -1
|
||||
std::vector<int> well_on_rank_global(num_wells * nranks, 0);
|
||||
comm.allgather(well_on_rank.data(), static_cast<int>(num_wells), well_on_rank_global.data());
|
||||
if (comm.rank() == 0) {
|
||||
well_idx = 0;
|
||||
for (const auto& well_name : inactive_well_names) {
|
||||
std::string msg = fmt::format("Well {} is inactive, with perforations on ranks: ", well_name);
|
||||
for (int i=0; i<nranks; ++i) {
|
||||
if (well_on_rank_global[i*num_wells + well_idx]) msg += fmt::format("{} ", i);
|
||||
}
|
||||
OpmLog::info(msg);
|
||||
++well_idx;
|
||||
}
|
||||
}
|
||||
++well_idx;
|
||||
}
|
||||
// values from rank i will be at indices i*num_wells, i*num_wells + 1, ..., (i+1) * num_wells -1
|
||||
std::vector<int> well_on_rank_global(num_wells * nranks, 0);
|
||||
comm.allgather(well_on_rank.data(), static_cast<int>(num_wells), well_on_rank_global.data());
|
||||
well_idx = 0;
|
||||
for (const auto& well_name : inactive_well_names) {
|
||||
for (int i=0; i<nranks; ++i) {
|
||||
if (well_on_rank_global[i*num_wells + well_idx]) {
|
||||
parallelWells.emplace_back(well_name, true);
|
||||
}
|
||||
}
|
||||
++well_idx;
|
||||
}
|
||||
std::sort(parallelWells.begin(), parallelWells.end());
|
||||
|
||||
|
||||
Reference in New Issue
Block a user