Post-review update - reducing global communication and simplifying

This commit is contained in:
Vegard Kippe 2024-10-16 22:23:28 +02:00
parent 6c4512be12
commit 9d9726920b
2 changed files with 22 additions and 27 deletions

View File

@ -214,24 +214,31 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
for (const auto cell : global_cells) cellOnRank.insert(cell);
const auto& comm = this->grid_->comm();
const auto nranks = comm.size();
const auto rank = comm.rank();
const auto inactive_well_names = schedule.getInactiveWellNamesAtEnd();
std::size_t num_wells = inactive_well_names.size();
std::vector<int> well_on_rank(num_wells, 0);
std::size_t well_idx = 0;
for (const auto& well_name : inactive_well_names) {
const auto& well = schedule.getWell(well_name, schedule.size()-1);
std::vector<int> well_on_rank(nranks, 0);
for (const auto& conn: well.getConnections()) {
if (cellOnRank.count(conn.global_index()) > 0) {
well_on_rank[rank] = 1;
well_on_rank[well_idx] = 1;
break;
}
}
std::vector<int> well_on_rank_global(nranks, 0);
comm.max(&well_on_rank[0], nranks);
++well_idx;
}
// values from rank i will be at indices i*num_wells, i*num_wells + 1, ..., (i+1) * num_wells -1
std::vector<int> well_on_rank_global(num_wells * nranks, 0);
comm.allgather(well_on_rank.data(), static_cast<int>(num_wells), well_on_rank_global.data());
well_idx = 0;
for (const auto& well_name : inactive_well_names) {
for (int i=0; i<nranks; ++i) {
if (well_on_rank[i]) {
if (well_on_rank_global[i*num_wells + well_idx]) {
parallelWells.emplace_back(well_name, i);
}
}
++well_idx;
}
std::sort(parallelWells.begin(), parallelWells.end());

View File

@ -405,33 +405,21 @@ template<class FluidSystem>
void GenericOutputBlackoilModule<FluidSystem>::
collectRftMapOnRoot(std::map<std::size_t, Scalar>& local_map, const Parallel::Communication& comm) {
const auto mapsize = local_map.size();
std::vector<int> keys;
std::vector<Scalar> values;
keys.reserve(mapsize);
values.reserve(mapsize);
for (const auto& [key, value] : local_map) {
keys.push_back(key);
values.push_back(value);
}
std::vector<int> all_keys;
std::vector<Scalar> all_values;
std::vector<std::pair<int, Scalar>> pairs(local_map.begin(), local_map.end());
std::vector<std::pair<int, Scalar>> all_pairs;
std::vector<int> offsets;
std::tie(all_keys, offsets) = Opm::gatherv(keys, comm, 0);
std::tie(all_values, std::ignore) = Opm::gatherv(values, comm, 0);
assert(all_keys.size() == all_values.size());
std::tie(all_pairs, offsets) = Opm::gatherv(pairs, comm, 0);
// Insert/update map values on root
if (comm.rank() == 0) {
for (auto i=static_cast<std::size_t>(offsets[1]); i<all_keys.size(); ++i) {
const auto index = all_keys[i];
if (local_map.count(index)>0) {
const Scalar prev_value = local_map[index];
local_map[index] = std::max(prev_value, all_values[i]);
for (auto i=static_cast<std::size_t>(offsets[1]); i<all_pairs.size(); ++i) {
const auto& key_value = all_pairs[i];
if (auto candidate = local_map.find(key_value.first); candidate != local_map.end()) {
const Scalar prev_value = candidate->second;
candidate->second = std::max(prev_value, key_value.second);
} else {
local_map[index] = all_values[i];
local_map[key_value.first] = key_value.second;
}
}
}