From 3c66b729e1051a6901c2e58cfdce4c4c67369076 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Mon, 7 Dec 2020 16:04:57 +0100 Subject: [PATCH 1/5] Fixes perf rate initialization for distributed wells. --- opm/simulators/wells/ParallelWellInfo.hpp | 12 ++++++++++ .../wells/WellStateFullyImplicitBlackoil.hpp | 22 ++++++++++++------- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 939e3d87c..62a29b6dd 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -27,6 +27,8 @@ #include #include +#include +#include namespace Opm { @@ -197,6 +199,16 @@ public: /// \brief Inidicate completion of reset of the ecl index information void endReset(); + /// \brief Sum all the values of the perforations + template + typename std::iterator_traits::value_type sumPerfValues(It begin, It end) + { + using V = typename std::iterator_traits::value_type; + /// \todo cater for overlap later. Currently only owner + auto local = std::accumulate(begin, end, V()); + return communication().sum(local); + } + /// \brief Free data of communication data structures. void clearCommunicateAbove(); private: diff --git a/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp b/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp index 36ef1d02b..f66aa9ea0 100644 --- a/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp +++ b/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp @@ -146,11 +146,12 @@ namespace Opm const auto& well_info = this->wellMap().at(wname); const int connpos = well_info[1]; const int num_perf_this_well = well_info[2]; + int global_num_perf_this_well = parallel_well_info[w]->communication().sum(num_perf_this_well); for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf) { if (wells_ecl[w].getStatus() == Well::Status::OPEN) { for (int p = 0; p < np; ++p) { - perfphaserates_[np*perf + p] = wellRates()[np*w + p] / double(num_perf_this_well); + perfphaserates_[np*perf + p] = wellRates()[np*w + p] / double(global_num_perf_this_well); } } perfPress()[perf] = cellPressures[well_perf_data[w][perf-connpos].cell_index]; @@ -224,6 +225,10 @@ namespace Opm // perfPhaseRates const int oldPerf_idx_beg = (*it).second[ 1 ]; const int num_perf_old_well = (*it).second[ 2 ]; + int num_perf_changed = (num_perf_old_well != num_perf_this_well) ? 1 : 0; + num_perf_changed = parallel_well_info[w]->communication().sum(num_perf_changed); + bool global_num_perf_same = (num_perf_changed == 0); + // copy perforation rates when the number of perforations is equal, // otherwise initialize perfphaserates to well rates divided by the number of perforations. @@ -231,7 +236,7 @@ namespace Opm if (new_iter == this->wellMap().end()) throw std::logic_error("Fatal error in WellStateFullyImplicitBlackoil - could not find well: " + well.name()); int connpos = new_iter->second[1]; - if( num_perf_old_well == num_perf_this_well ) + if( global_num_perf_same ) { int old_perf_phase_idx = oldPerf_idx_beg *np; for (int perf_phase_idx = connpos*np; @@ -240,14 +245,15 @@ namespace Opm perfPhaseRates()[ perf_phase_idx ] = prevState->perfPhaseRates()[ old_perf_phase_idx ]; } } else { + int global_num_perf_this_well = parallel_well_info[w]->communication().sum(num_perf_this_well); for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf) { for (int p = 0; p < np; ++p) { - perfPhaseRates()[np*perf + p] = wellRates()[np*newIndex + p] / double(num_perf_this_well); + perfPhaseRates()[np*perf + p] = wellRates()[np*newIndex + p] / double(global_num_perf_this_well); } } } // perfPressures - if( num_perf_old_well == num_perf_this_well ) + if( global_num_perf_same ) { int oldPerf_idx = oldPerf_idx_beg; for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf, ++oldPerf_idx ) @@ -257,7 +263,7 @@ namespace Opm } // perfSolventRates if (pu.has_solvent) { - if( num_perf_old_well == num_perf_this_well ) + if( global_num_perf_same ) { int oldPerf_idx = oldPerf_idx_beg; for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf, ++oldPerf_idx ) @@ -907,7 +913,7 @@ namespace Opm /// One rate pr well double solventWellRate(const int w) const { - return std::accumulate(&perfRateSolvent_[0] + first_perf_index_[w], &perfRateSolvent_[0] + first_perf_index_[w+1], 0.0); + return parallel_well_info_[w]->sumPerfValues(&perfRateSolvent_[0] + first_perf_index_[w], &perfRateSolvent_[0] + first_perf_index_[w+1]); } /// One rate pr well connection. @@ -916,7 +922,7 @@ namespace Opm /// One rate pr well double polymerWellRate(const int w) const { - return std::accumulate(&perfRatePolymer_[0] + first_perf_index_[w], &perfRatePolymer_[0] + first_perf_index_[w+1], 0.0); + return parallel_well_info_[w]->sumPerfValues(&perfRatePolymer_[0] + first_perf_index_[w], &perfRatePolymer_[0] + first_perf_index_[w+1]); } /// One rate pr well connection. @@ -925,7 +931,7 @@ namespace Opm /// One rate pr well double brineWellRate(const int w) const { - return std::accumulate(&perfRateBrine_[0] + first_perf_index_[w], &perfRateBrine_[0] + first_perf_index_[w+1], 0.0); + return parallel_well_info_[w]->sumPerfValues(&perfRateBrine_[0] + first_perf_index_[w], &perfRateBrine_[0] + first_perf_index_[w+1]); } std::vector& wellReservoirRates() From 35218bf0427010796799eca4b9f20af30eee913a Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Tue, 8 Dec 2020 19:02:33 +0100 Subject: [PATCH 2/5] Added possibility to communicate values from perforations below. --- .../wells/BlackoilWellModel_impl.hpp | 2 +- opm/simulators/wells/ParallelWellInfo.cpp | 88 ++++++++++++++----- opm/simulators/wells/ParallelWellInfo.hpp | 44 +++++++--- tests/test_parallelwellinfo.cpp | 68 +++++++++----- 4 files changed, 147 insertions(+), 55 deletions(-) diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 02fac730e..9b395f5c4 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -467,7 +467,7 @@ namespace Opm { // Clear the communication data structures for above values. for(auto&& pinfo : local_parallel_well_info_) { - pinfo->clearCommunicateAbove(); + pinfo->clearCommunicateAboveBelow(); } } diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp index 5e8964406..03b879623 100644 --- a/opm/simulators/wells/ParallelWellInfo.cpp +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -43,13 +43,13 @@ struct CommPolicy namespace Opm { -CommunicateAbove::CommunicateAbove([[maybe_unused]] const Communication& comm) +CommunicateAboveBelow::CommunicateAboveBelow([[maybe_unused]] const Communication& comm) #if HAVE_MPI : comm_(comm), interface_(comm_) #endif {} -void CommunicateAbove::clear() +void CommunicateAboveBelow::clear() { #if HAVE_MPI above_indices_ = {}; @@ -60,7 +60,7 @@ void CommunicateAbove::clear() count_ = 0; } -void CommunicateAbove::beginReset() +void CommunicateAboveBelow::beginReset() { clear(); #if HAVE_MPI @@ -72,7 +72,7 @@ void CommunicateAbove::beginReset() #endif } -void CommunicateAbove::endReset() +void CommunicateAboveBelow::endReset() { #if HAVE_MPI if (comm_.size() > 1) @@ -104,9 +104,9 @@ struct CopyGatherScatter }; -std::vector CommunicateAbove::communicate(double first_above, - const double* current, - std::size_t size) +std::vector CommunicateAboveBelow::communicateAbove(double first_above, + const double* current, + std::size_t size) { std::vector above(size, first_above); @@ -131,10 +131,37 @@ std::vector CommunicateAbove::communicate(double first_above, } return above; } +std::vector CommunicateAboveBelow::communicateBelow(double last_below, + const double* current, + std::size_t size) +{ + std::vector below(size, last_below); -void CommunicateAbove::pushBackEclIndex([[maybe_unused]] int above, - [[maybe_unused]] int current, - [[maybe_unused]] bool isOwner) +#if HAVE_MPI + if (comm_.size() > 1) + { + auto belowData = below.data(); + // Ugly const_cast needed as my compiler says, that + // passing const double*& and double* as parameter is + // incompatible with function decl template backward(Data&, const Data&) + // That would need the first argument to be double* const& + communicator_.backward(belowData, const_cast(current)); + } + else +#endif + { + if (below.size() > 1) + { + // No comunication needed, just copy. + std::copy(current+1, current + below.size(), below.begin()); + } + } + return below; +} + +void CommunicateAboveBelow::pushBackEclIndex([[maybe_unused]] int above, + [[maybe_unused]] int current, + [[maybe_unused]] bool isOwner) { #if HAVE_MPI if (comm_.size() > 1) @@ -175,8 +202,8 @@ ParallelWellInfo::ParallelWellInfo(const std::string& name, : name_(name), hasLocalCells_ (hasLocalCells), isOwner_(true), rankWithFirstPerf_(-1), comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())), - commAbove_(new CommunicateAbove(*comm_)) - {} + commAboveBelow_(new CommunicateAboveBelow(*comm_)) +{} ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, [[maybe_unused]] Communication allComm) @@ -191,7 +218,7 @@ ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, #else comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator())); #endif - commAbove_.reset(new CommunicateAbove(*comm_)); + commAboveBelow_.reset(new CommunicateAboveBelow(*comm_)); isOwner_ = (comm_->rank() == 0); } @@ -207,38 +234,53 @@ void ParallelWellInfo::communicateFirstPerforation(bool hasFirst) void ParallelWellInfo::pushBackEclIndex(int above, int current) { - commAbove_->pushBackEclIndex(above, current); + commAboveBelow_->pushBackEclIndex(above, current); } void ParallelWellInfo::beginReset() { - commAbove_->beginReset(); + commAboveBelow_->beginReset(); } void ParallelWellInfo::endReset() { - commAbove_->beginReset(); + commAboveBelow_->beginReset(); } -void ParallelWellInfo::clearCommunicateAbove() +void ParallelWellInfo::clearCommunicateAboveBelow() { - commAbove_->clear(); + commAboveBelow_->clear(); } std::vector ParallelWellInfo::communicateAboveValues(double zero_value, const double* current_values, std::size_t size) const { - return commAbove_->communicate(zero_value, current_values, - size); + return commAboveBelow_->communicateAbove(zero_value, current_values, + size); } std::vector ParallelWellInfo::communicateAboveValues(double zero_value, const std::vector& current_values) const { - return commAbove_->communicate(zero_value, current_values.data(), - current_values.size()); + return commAboveBelow_->communicateAbove(zero_value, current_values.data(), + current_values.size()); +} + +std::vector ParallelWellInfo::communicateBelowValues(double last_value, + const double* current_values, + std::size_t size) const +{ + return commAboveBelow_->communicateBelow(last_value, current_values, + size); +} + +std::vector ParallelWellInfo::communicateBelowValues(double last_value, + const std::vector& current_values) const +{ + return commAboveBelow_->communicateBelow(last_value, current_values.data(), + current_values.size()); } bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2) @@ -294,7 +336,7 @@ bool operator!=(const ParallelWellInfo& well, const std::pair } CheckDistributedWellConnections::CheckDistributedWellConnections(const Well& well, - const ParallelWellInfo& info) + const ParallelWellInfo& info) : well_(well), pwinfo_(info) { foundConnections_.resize(well.getConnections().size(), 0); diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 62a29b6dd..1fbbcdaa0 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -33,9 +33,9 @@ namespace Opm { -/// \brief Class to facilitate getting values associated with the above perforation +/// \brief Class to facilitate getting values associated with the above/below perforation /// -class CommunicateAbove +class CommunicateAboveBelow { enum Attribute { owner=1, overlap=2 @@ -53,7 +53,7 @@ class CommunicateAbove #endif public: - explicit CommunicateAbove(const Communication& comm); + explicit CommunicateAboveBelow(const Communication& comm); /// \brief Adds information about original index of the perforations in ECL Schedule. /// /// \warning Theses indices need to be push in the same order as they @@ -81,10 +81,18 @@ public: /// \param current C-array of the values at the perforations /// \param size The size of the C-array and the returned vector /// \return a vector containing the values for the perforation above. - std::vector communicate(double first_value, - const double* current, - std::size_t size); + std::vector communicateAbove(double first_value, + const double* current, + std::size_t size); + /// \brief Creates an array of values for the perforation below. + /// \param first_value Value to use for above of the first perforation + /// \param current C-array of the values at the perforations + /// \param size The size of the C-array and the returned vector + /// \return a vector containing the values for the perforation above. + std::vector communicateBelow(double first_value, + const double* current, + std::size_t size); private: #if HAVE_MPI Communication comm_; @@ -157,15 +165,31 @@ public: /// \param current C-array of the values at the perforations /// \param size The size of the C-array and the returned vector /// \return a vector containing the values for the perforation above. - std::vector communicateAboveValues(double zero_value, + std::vector communicateAboveValues(double first_value, const double* current, std::size_t size) const; /// \brief Creates an array of values for the perforation above. /// \param first_value Value to use for above of the first perforation /// \param current vector of current values - std::vector communicateAboveValues(double zero_value, + std::vector communicateAboveValues(double first_value, const std::vector& current) const; + + /// \brief Creates an array of values for the perforation below. + /// \param last_value Value to use for below of the last perforation + /// \param current C-array of the values at the perforations + /// \param size The size of the C-array and the returned vector + /// \return a vector containing the values for the perforation above. + std::vector communicateBelowValues(double last_value, + const double* current, + std::size_t size) const; + + /// \brief Creates an array of values for the perforation above. + /// \param last_value Value to use for below of the last perforation + /// \param current vector of current values + std::vector communicateBelowValues(double last_value, + const std::vector& current) const; + /// \brief Adds information about the ecl indices of the perforations. /// /// \warning Theses indices need to be push in the same order as they @@ -210,7 +234,7 @@ public: } /// \brief Free data of communication data structures. - void clearCommunicateAbove(); + void clearCommunicateAboveBelow(); private: /// \brief Deleter that also frees custom MPI communicators @@ -234,7 +258,7 @@ private: std::unique_ptr comm_; /// \brief used to communicate the values for the perforation above. - std::unique_ptr commAbove_; + std::unique_ptr commAboveBelow_; }; /// \brief Class checking that all connections are on active cells diff --git a/tests/test_parallelwellinfo.cpp b/tests/test_parallelwellinfo.cpp index 0cfe06469..8e6fa2766 100644 --- a/tests/test_parallelwellinfo.cpp +++ b/tests/test_parallelwellinfo.cpp @@ -153,64 +153,76 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison) } -BOOST_AUTO_TEST_CASE(CommunicateAboveSelf) +BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf) { auto comm = Dune::MPIHelper::getLocalCommunicator(); - Opm::CommunicateAbove commAbove{ comm }; + Opm::CommunicateAboveBelow commAboveBelow{ comm }; for(std::size_t count=0; count < 2; ++count) { std::vector eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; std::vector current(eclIndex.size()); std::transform(eclIndex.begin(), eclIndex.end(), current.begin(), [](double v){ return 1+10.0*v;}); - commAbove.beginReset(); + commAboveBelow.beginReset(); for (std::size_t i = 0; i < current.size(); ++i) { if (i==0) - commAbove.pushBackEclIndex(-1, eclIndex[i]); + commAboveBelow.pushBackEclIndex(-1, eclIndex[i]); else - commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); + commAboveBelow.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); } - commAbove.endReset(); - auto above = commAbove.communicate(-10.0, current.data(), current.size()); + commAboveBelow.endReset(); + auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size()); BOOST_CHECK(above[0]==-10.0); BOOST_CHECK(above.size() == current.size()); auto a = above.begin()+1; std::for_each(current.begin(), current.begin() + (current.size()-1), [&a](double v){ BOOST_CHECK(*(a++) == v);}); + auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size()); + BOOST_CHECK(below.back() == -10.0); + BOOST_CHECK(below.size() == current.size()); + auto b = below.begin(); + std::for_each(current.begin()+1, current.end(), + [&b](double v){ BOOST_CHECK(*(b++) == v);}); } } -BOOST_AUTO_TEST_CASE(CommunicateAboveSelf1) +BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf1) { auto comm = Dune::MPIHelper::getLocalCommunicator(); - Opm::CommunicateAbove commAbove{ comm }; + Opm::CommunicateAboveBelow commAboveBelow{ comm }; for(std::size_t count=0; count < 2; ++count) { std::vector eclIndex = {0}; std::vector current(eclIndex.size()); std::transform(eclIndex.begin(), eclIndex.end(), current.begin(), [](double v){ return 1+10.0*v;}); - commAbove.beginReset(); + commAboveBelow.beginReset(); for (std::size_t i = 0; i < current.size(); ++i) { if (i==0) - commAbove.pushBackEclIndex(-1, eclIndex[i]); + commAboveBelow.pushBackEclIndex(-1, eclIndex[i]); else - commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); + commAboveBelow.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); } - commAbove.endReset(); - auto above = commAbove.communicate(-10.0, current.data(), current.size()); + commAboveBelow.endReset(); + auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size()); BOOST_CHECK(above[0]==-10.0); BOOST_CHECK(above.size() == current.size()); auto a = above.begin()+1; std::for_each(current.begin(), current.begin() + (current.size()-1), [&a](double v){ BOOST_CHECK(*(a++) == v);}); + auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size()); + BOOST_CHECK(below.back() == -10.0); + BOOST_CHECK(below.size() == current.size()); + auto b = below.begin(); + std::for_each(current.begin()+1, current.end(), + [&b](double v){ BOOST_CHECK(*(b++) == v);}); } } -BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) +BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel) { using MPIComm = typename Dune::MPIHelper::MPICommunicator; #if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) @@ -220,7 +232,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) #endif auto comm = Communication(Dune::MPIHelper::getCommunicator()); - Opm::CommunicateAbove commAbove{ comm }; + Opm::CommunicateAboveBelow commAboveBelow{ comm }; for(std::size_t count=0; count < 2; ++count) { std::vector globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; @@ -244,23 +256,23 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) std::vector current(3); - commAbove.beginReset(); + commAboveBelow.beginReset(); for (std::size_t i = 0; i < current.size(); ++i) { auto gi = comm.rank() + comm.size() * i; if (gi==0) { - commAbove.pushBackEclIndex(-1, globalEclIndex[gi]); + commAboveBelow.pushBackEclIndex(-1, globalEclIndex[gi]); } else { - commAbove.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]); + commAboveBelow.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]); } current[i] = globalCurrent[gi]; } - commAbove.endReset(); - auto above = commAbove.communicate(-10.0, current.data(), current.size()); + commAboveBelow.endReset(); + auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size()); if (comm.rank() == 0) BOOST_CHECK(above[0]==-10.0); @@ -274,5 +286,19 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) BOOST_CHECK(above[i]==globalCurrent[gi-1]); } } + auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size()); + if (comm.rank() == comm.size() - 1) + BOOST_CHECK(below.back() == -10.0); + + BOOST_CHECK(below.size() == current.size()); + + for (std::size_t i = 0; i < current.size(); ++i) + { + auto gi = comm.rank() + comm.size() * i; + if (gi < globalCurrent.size() - 1) + { + BOOST_CHECK(below[i]==globalCurrent[gi+1]); + } + } } } From c0c1897ea93a6ffc3d2fb3abd81778d1dc52daa1 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Tue, 8 Dec 2020 21:40:21 +0100 Subject: [PATCH 3/5] Fix computeConnectionPressureDelta for distributed wells. As this is as sequential (ordering matters!) as it can get we need to communicate all perforations, do the partial sum with them and save result back to the local perforations. --- opm/simulators/wells/ParallelWellInfo.hpp | 85 +++++++++++++++++++++- opm/simulators/wells/StandardWell_impl.hpp | 2 +- 2 files changed, 84 insertions(+), 3 deletions(-) diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 1fbbcdaa0..4d501721f 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -93,9 +94,76 @@ public: std::vector communicateBelow(double first_value, const double* current, std::size_t size); -private: + + /// \brief Do a (in place) partial sum on values attached to all perforations. + /// + /// For distributed wells this may include perforations stored elsewhere. + /// The result is stored in ther range given as the parameters + /// \param begin The start of the range + /// \param ebd The end of the range + /// \tparam RAIterator The type og random access iterator + template + void partialSumPerfValues(RAIterator begin, RAIterator end) const + { + if (this->comm_.size() < 2) + { + std::partial_sum(begin, end, begin); + } + else + { #if HAVE_MPI + // The global index used in the index set current_indices + // is the index of the perforation in ECL Schedule definition. + // This is assumed to give the topological order that is used + // when doing the partial sum. + // allgather the index of the perforation in ECL schedule and the value. + using Value = typename std::iterator_traits::value_type; + std::vector sizes(comm_.size()); + std::vector displ(comm_.size(), 0); + using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex; + using Pair = std::pair; + std::vector my_pairs; + my_pairs.reserve(current_indices_.size()); + for (const auto& pair: current_indices_) + { + if (pair.local().attribute() == owner) + { + my_pairs.emplace_back(pair.global(), *(begin+(static_cast(pair.local())))); + } + } + int mySize = my_pairs.size(); + comm_.allgather(&mySize, 1, sizes.data()); + std::partial_sum(sizes.begin(), sizes.end(), displ.begin()+1); + std::vector global_pairs(displ.back()); + comm_.allgatherv(my_pairs.data(), my_pairs.size(), global_pairs.data(), sizes.data(), displ.data()); + // sort the complete range to get the correct ordering + std::sort(global_pairs.begin(), global_pairs.end(), + [](const Pair& p1, const Pair& p2){ return p1.first < p2.first; } ); + std::vector sums(global_pairs.size()); + std::transform(global_pairs.begin(), global_pairs.end(), sums.begin(), + [](const Pair& p) { return p.second; }); + std::partial_sum(sums.begin(), sums.end(),sums.begin()); + // assign the values (both ranges are sorted by the ecl index) + auto global_pair = global_pairs.begin(); + for (const auto& pair: current_indices_) + { + global_pair = std::lower_bound(global_pair, global_pairs.end(), + pair.global(), + [](const Pair& val1, const GlobalIndex& val2) + { return val1.first < val2; }); + assert(global_pair != global_pairs.end()); + assert(global_pair->first == pair.global()); + *(begin + static_cast(pair.local())) = sums[global_pair - global_pairs.begin()]; + } +#else + OPM_THROW(std::logic_error, "In a sequential run the size of the communicator should be 1!"); +#endif + } + } + +private: Communication comm_; +#if HAVE_MPI /// \brief Mapping of the local well index to ecl index IndexSet current_indices_; /// \brief Mapping of the above well index to ecl index @@ -225,7 +293,7 @@ public: /// \brief Sum all the values of the perforations template - typename std::iterator_traits::value_type sumPerfValues(It begin, It end) + typename std::iterator_traits::value_type sumPerfValues(It begin, It end) const { using V = typename std::iterator_traits::value_type; /// \todo cater for overlap later. Currently only owner @@ -233,6 +301,19 @@ public: return communication().sum(local); } + /// \brief Do a (in place) partial sum on values attached to all perforations. + /// + /// For distributed wells this may include perforations stored elsewhere. + /// The result is stored in ther range given as the parameters + /// \param begin The start of the range + /// \param ebd The end of the range + /// \tparam RAIterator The type og random access iterator + template + void partialSumPerfValues(RAIterator begin, RAIterator end) const + { + commAboveBelow_->partialSumPerfValues(begin, end); + } + /// \brief Free data of communication data structures. void clearCommunicateAboveBelow(); private: diff --git a/opm/simulators/wells/StandardWell_impl.hpp b/opm/simulators/wells/StandardWell_impl.hpp index fe8439033..ab94038c9 100644 --- a/opm/simulators/wells/StandardWell_impl.hpp +++ b/opm/simulators/wells/StandardWell_impl.hpp @@ -2142,7 +2142,7 @@ namespace Opm // This accumulation must be done per well. const auto beg = perf_pressure_diffs_.begin(); const auto end = perf_pressure_diffs_.end(); - std::partial_sum(beg, end, beg); + this->parallel_well_info_.partialSumPerfValues(beg, end); } From d7ae211729cf3af7acaaf494af5eb8a86187db9c Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Wed, 9 Dec 2020 15:37:03 +0100 Subject: [PATCH 4/5] Also test ParallelWellInfo with an MPI run if possible. --- CMakeLists.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 12bfa5868..b0b08f806 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -212,6 +212,17 @@ opm_add_test(test_gatherdeferredlogger 4 ${PROJECT_BINARY_DIR} ) +opm_add_test(test_parallelwellinfo_mpi + DEPENDS "opmsimulators" + LIBRARIES opmsimulators ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} + SOURCES + tests/test_parallelwellinfo.cpp + CONDITION + MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND + DRIVER_ARGS + 4 ${PROJECT_BINARY_DIR} + ) + include(OpmBashCompletion) if (NOT BUILD_FLOW) From b4ed86313e9e516205afb03b4b32cfe2a0265e97 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 11 Dec 2020 11:24:51 +0100 Subject: [PATCH 5/5] use random access to simplify code during partial_sum of dist wells --- opm/simulators/wells/ParallelWellInfo.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 4d501721f..b1c202315 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -128,7 +128,7 @@ public: { if (pair.local().attribute() == owner) { - my_pairs.emplace_back(pair.global(), *(begin+(static_cast(pair.local())))); + my_pairs.emplace_back(pair.global(), begin[pair.local()]); } } int mySize = my_pairs.size();