diff --git a/CMakeLists.txt b/CMakeLists.txt index 12bfa5868..b0b08f806 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -212,6 +212,17 @@ opm_add_test(test_gatherdeferredlogger 4 ${PROJECT_BINARY_DIR} ) +opm_add_test(test_parallelwellinfo_mpi + DEPENDS "opmsimulators" + LIBRARIES opmsimulators ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} + SOURCES + tests/test_parallelwellinfo.cpp + CONDITION + MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND + DRIVER_ARGS + 4 ${PROJECT_BINARY_DIR} + ) + include(OpmBashCompletion) if (NOT BUILD_FLOW) diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 6533ff457..b9bda118b 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -467,7 +467,7 @@ namespace Opm { // Clear the communication data structures for above values. for(auto&& pinfo : local_parallel_well_info_) { - pinfo->clearCommunicateAbove(); + pinfo->clearCommunicateAboveBelow(); } } diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp index 5e8964406..03b879623 100644 --- a/opm/simulators/wells/ParallelWellInfo.cpp +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -43,13 +43,13 @@ struct CommPolicy namespace Opm { -CommunicateAbove::CommunicateAbove([[maybe_unused]] const Communication& comm) +CommunicateAboveBelow::CommunicateAboveBelow([[maybe_unused]] const Communication& comm) #if HAVE_MPI : comm_(comm), interface_(comm_) #endif {} -void CommunicateAbove::clear() +void CommunicateAboveBelow::clear() { #if HAVE_MPI above_indices_ = {}; @@ -60,7 +60,7 @@ void CommunicateAbove::clear() count_ = 0; } -void CommunicateAbove::beginReset() +void CommunicateAboveBelow::beginReset() { clear(); #if HAVE_MPI @@ -72,7 +72,7 @@ void CommunicateAbove::beginReset() #endif } -void CommunicateAbove::endReset() +void CommunicateAboveBelow::endReset() { #if HAVE_MPI if (comm_.size() > 1) @@ -104,9 +104,9 @@ struct CopyGatherScatter }; -std::vector CommunicateAbove::communicate(double first_above, - const double* current, - std::size_t size) +std::vector CommunicateAboveBelow::communicateAbove(double first_above, + const double* current, + std::size_t size) { std::vector above(size, first_above); @@ -131,10 +131,37 @@ std::vector CommunicateAbove::communicate(double first_above, } return above; } +std::vector CommunicateAboveBelow::communicateBelow(double last_below, + const double* current, + std::size_t size) +{ + std::vector below(size, last_below); -void CommunicateAbove::pushBackEclIndex([[maybe_unused]] int above, - [[maybe_unused]] int current, - [[maybe_unused]] bool isOwner) +#if HAVE_MPI + if (comm_.size() > 1) + { + auto belowData = below.data(); + // Ugly const_cast needed as my compiler says, that + // passing const double*& and double* as parameter is + // incompatible with function decl template backward(Data&, const Data&) + // That would need the first argument to be double* const& + communicator_.backward(belowData, const_cast(current)); + } + else +#endif + { + if (below.size() > 1) + { + // No comunication needed, just copy. + std::copy(current+1, current + below.size(), below.begin()); + } + } + return below; +} + +void CommunicateAboveBelow::pushBackEclIndex([[maybe_unused]] int above, + [[maybe_unused]] int current, + [[maybe_unused]] bool isOwner) { #if HAVE_MPI if (comm_.size() > 1) @@ -175,8 +202,8 @@ ParallelWellInfo::ParallelWellInfo(const std::string& name, : name_(name), hasLocalCells_ (hasLocalCells), isOwner_(true), rankWithFirstPerf_(-1), comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())), - commAbove_(new CommunicateAbove(*comm_)) - {} + commAboveBelow_(new CommunicateAboveBelow(*comm_)) +{} ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, [[maybe_unused]] Communication allComm) @@ -191,7 +218,7 @@ ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, #else comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator())); #endif - commAbove_.reset(new CommunicateAbove(*comm_)); + commAboveBelow_.reset(new CommunicateAboveBelow(*comm_)); isOwner_ = (comm_->rank() == 0); } @@ -207,38 +234,53 @@ void ParallelWellInfo::communicateFirstPerforation(bool hasFirst) void ParallelWellInfo::pushBackEclIndex(int above, int current) { - commAbove_->pushBackEclIndex(above, current); + commAboveBelow_->pushBackEclIndex(above, current); } void ParallelWellInfo::beginReset() { - commAbove_->beginReset(); + commAboveBelow_->beginReset(); } void ParallelWellInfo::endReset() { - commAbove_->beginReset(); + commAboveBelow_->beginReset(); } -void ParallelWellInfo::clearCommunicateAbove() +void ParallelWellInfo::clearCommunicateAboveBelow() { - commAbove_->clear(); + commAboveBelow_->clear(); } std::vector ParallelWellInfo::communicateAboveValues(double zero_value, const double* current_values, std::size_t size) const { - return commAbove_->communicate(zero_value, current_values, - size); + return commAboveBelow_->communicateAbove(zero_value, current_values, + size); } std::vector ParallelWellInfo::communicateAboveValues(double zero_value, const std::vector& current_values) const { - return commAbove_->communicate(zero_value, current_values.data(), - current_values.size()); + return commAboveBelow_->communicateAbove(zero_value, current_values.data(), + current_values.size()); +} + +std::vector ParallelWellInfo::communicateBelowValues(double last_value, + const double* current_values, + std::size_t size) const +{ + return commAboveBelow_->communicateBelow(last_value, current_values, + size); +} + +std::vector ParallelWellInfo::communicateBelowValues(double last_value, + const std::vector& current_values) const +{ + return commAboveBelow_->communicateBelow(last_value, current_values.data(), + current_values.size()); } bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2) @@ -294,7 +336,7 @@ bool operator!=(const ParallelWellInfo& well, const std::pair } CheckDistributedWellConnections::CheckDistributedWellConnections(const Well& well, - const ParallelWellInfo& info) + const ParallelWellInfo& info) : well_(well), pwinfo_(info) { foundConnections_.resize(well.getConnections().size(), 0); diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 939e3d87c..b1c202315 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -24,16 +24,19 @@ #include #include +#include #include #include +#include +#include namespace Opm { -/// \brief Class to facilitate getting values associated with the above perforation +/// \brief Class to facilitate getting values associated with the above/below perforation /// -class CommunicateAbove +class CommunicateAboveBelow { enum Attribute { owner=1, overlap=2 @@ -51,7 +54,7 @@ class CommunicateAbove #endif public: - explicit CommunicateAbove(const Communication& comm); + explicit CommunicateAboveBelow(const Communication& comm); /// \brief Adds information about original index of the perforations in ECL Schedule. /// /// \warning Theses indices need to be push in the same order as they @@ -79,13 +82,88 @@ public: /// \param current C-array of the values at the perforations /// \param size The size of the C-array and the returned vector /// \return a vector containing the values for the perforation above. - std::vector communicate(double first_value, - const double* current, - std::size_t size); + std::vector communicateAbove(double first_value, + const double* current, + std::size_t size); + + /// \brief Creates an array of values for the perforation below. + /// \param first_value Value to use for above of the first perforation + /// \param current C-array of the values at the perforations + /// \param size The size of the C-array and the returned vector + /// \return a vector containing the values for the perforation above. + std::vector communicateBelow(double first_value, + const double* current, + std::size_t size); + + /// \brief Do a (in place) partial sum on values attached to all perforations. + /// + /// For distributed wells this may include perforations stored elsewhere. + /// The result is stored in ther range given as the parameters + /// \param begin The start of the range + /// \param ebd The end of the range + /// \tparam RAIterator The type og random access iterator + template + void partialSumPerfValues(RAIterator begin, RAIterator end) const + { + if (this->comm_.size() < 2) + { + std::partial_sum(begin, end, begin); + } + else + { +#if HAVE_MPI + // The global index used in the index set current_indices + // is the index of the perforation in ECL Schedule definition. + // This is assumed to give the topological order that is used + // when doing the partial sum. + // allgather the index of the perforation in ECL schedule and the value. + using Value = typename std::iterator_traits::value_type; + std::vector sizes(comm_.size()); + std::vector displ(comm_.size(), 0); + using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex; + using Pair = std::pair; + std::vector my_pairs; + my_pairs.reserve(current_indices_.size()); + for (const auto& pair: current_indices_) + { + if (pair.local().attribute() == owner) + { + my_pairs.emplace_back(pair.global(), begin[pair.local()]); + } + } + int mySize = my_pairs.size(); + comm_.allgather(&mySize, 1, sizes.data()); + std::partial_sum(sizes.begin(), sizes.end(), displ.begin()+1); + std::vector global_pairs(displ.back()); + comm_.allgatherv(my_pairs.data(), my_pairs.size(), global_pairs.data(), sizes.data(), displ.data()); + // sort the complete range to get the correct ordering + std::sort(global_pairs.begin(), global_pairs.end(), + [](const Pair& p1, const Pair& p2){ return p1.first < p2.first; } ); + std::vector sums(global_pairs.size()); + std::transform(global_pairs.begin(), global_pairs.end(), sums.begin(), + [](const Pair& p) { return p.second; }); + std::partial_sum(sums.begin(), sums.end(),sums.begin()); + // assign the values (both ranges are sorted by the ecl index) + auto global_pair = global_pairs.begin(); + for (const auto& pair: current_indices_) + { + global_pair = std::lower_bound(global_pair, global_pairs.end(), + pair.global(), + [](const Pair& val1, const GlobalIndex& val2) + { return val1.first < val2; }); + assert(global_pair != global_pairs.end()); + assert(global_pair->first == pair.global()); + *(begin + static_cast(pair.local())) = sums[global_pair - global_pairs.begin()]; + } +#else + OPM_THROW(std::logic_error, "In a sequential run the size of the communicator should be 1!"); +#endif + } + } private: -#if HAVE_MPI Communication comm_; +#if HAVE_MPI /// \brief Mapping of the local well index to ecl index IndexSet current_indices_; /// \brief Mapping of the above well index to ecl index @@ -155,15 +233,31 @@ public: /// \param current C-array of the values at the perforations /// \param size The size of the C-array and the returned vector /// \return a vector containing the values for the perforation above. - std::vector communicateAboveValues(double zero_value, + std::vector communicateAboveValues(double first_value, const double* current, std::size_t size) const; /// \brief Creates an array of values for the perforation above. /// \param first_value Value to use for above of the first perforation /// \param current vector of current values - std::vector communicateAboveValues(double zero_value, + std::vector communicateAboveValues(double first_value, const std::vector& current) const; + + /// \brief Creates an array of values for the perforation below. + /// \param last_value Value to use for below of the last perforation + /// \param current C-array of the values at the perforations + /// \param size The size of the C-array and the returned vector + /// \return a vector containing the values for the perforation above. + std::vector communicateBelowValues(double last_value, + const double* current, + std::size_t size) const; + + /// \brief Creates an array of values for the perforation above. + /// \param last_value Value to use for below of the last perforation + /// \param current vector of current values + std::vector communicateBelowValues(double last_value, + const std::vector& current) const; + /// \brief Adds information about the ecl indices of the perforations. /// /// \warning Theses indices need to be push in the same order as they @@ -197,8 +291,31 @@ public: /// \brief Inidicate completion of reset of the ecl index information void endReset(); + /// \brief Sum all the values of the perforations + template + typename std::iterator_traits::value_type sumPerfValues(It begin, It end) const + { + using V = typename std::iterator_traits::value_type; + /// \todo cater for overlap later. Currently only owner + auto local = std::accumulate(begin, end, V()); + return communication().sum(local); + } + + /// \brief Do a (in place) partial sum on values attached to all perforations. + /// + /// For distributed wells this may include perforations stored elsewhere. + /// The result is stored in ther range given as the parameters + /// \param begin The start of the range + /// \param ebd The end of the range + /// \tparam RAIterator The type og random access iterator + template + void partialSumPerfValues(RAIterator begin, RAIterator end) const + { + commAboveBelow_->partialSumPerfValues(begin, end); + } + /// \brief Free data of communication data structures. - void clearCommunicateAbove(); + void clearCommunicateAboveBelow(); private: /// \brief Deleter that also frees custom MPI communicators @@ -222,7 +339,7 @@ private: std::unique_ptr comm_; /// \brief used to communicate the values for the perforation above. - std::unique_ptr commAbove_; + std::unique_ptr commAboveBelow_; }; /// \brief Class checking that all connections are on active cells diff --git a/opm/simulators/wells/StandardWell_impl.hpp b/opm/simulators/wells/StandardWell_impl.hpp index fe8439033..ab94038c9 100644 --- a/opm/simulators/wells/StandardWell_impl.hpp +++ b/opm/simulators/wells/StandardWell_impl.hpp @@ -2142,7 +2142,7 @@ namespace Opm // This accumulation must be done per well. const auto beg = perf_pressure_diffs_.begin(); const auto end = perf_pressure_diffs_.end(); - std::partial_sum(beg, end, beg); + this->parallel_well_info_.partialSumPerfValues(beg, end); } diff --git a/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp b/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp index 21142a44e..227de193f 100644 --- a/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp +++ b/opm/simulators/wells/WellStateFullyImplicitBlackoil.hpp @@ -146,11 +146,12 @@ namespace Opm const auto& well_info = this->wellMap().at(wname); const int connpos = well_info[1]; const int num_perf_this_well = well_info[2]; + int global_num_perf_this_well = parallel_well_info[w]->communication().sum(num_perf_this_well); for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf) { if (wells_ecl[w].getStatus() == Well::Status::OPEN) { for (int p = 0; p < np; ++p) { - perfphaserates_[np*perf + p] = wellRates()[np*w + p] / double(num_perf_this_well); + perfphaserates_[np*perf + p] = wellRates()[np*w + p] / double(global_num_perf_this_well); } } perfPress()[perf] = cellPressures[well_perf_data[w][perf-connpos].cell_index]; @@ -224,6 +225,10 @@ namespace Opm // perfPhaseRates const int oldPerf_idx_beg = (*it).second[ 1 ]; const int num_perf_old_well = (*it).second[ 2 ]; + int num_perf_changed = (num_perf_old_well != num_perf_this_well) ? 1 : 0; + num_perf_changed = parallel_well_info[w]->communication().sum(num_perf_changed); + bool global_num_perf_same = (num_perf_changed == 0); + // copy perforation rates when the number of perforations is equal, // otherwise initialize perfphaserates to well rates divided by the number of perforations. @@ -231,7 +236,7 @@ namespace Opm if (new_iter == this->wellMap().end()) throw std::logic_error("Fatal error in WellStateFullyImplicitBlackoil - could not find well: " + well.name()); int connpos = new_iter->second[1]; - if( num_perf_old_well == num_perf_this_well ) + if( global_num_perf_same ) { int old_perf_phase_idx = oldPerf_idx_beg *np; for (int perf_phase_idx = connpos*np; @@ -240,14 +245,15 @@ namespace Opm perfPhaseRates()[ perf_phase_idx ] = prevState->perfPhaseRates()[ old_perf_phase_idx ]; } } else { + int global_num_perf_this_well = parallel_well_info[w]->communication().sum(num_perf_this_well); for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf) { for (int p = 0; p < np; ++p) { - perfPhaseRates()[np*perf + p] = wellRates()[np*newIndex + p] / double(num_perf_this_well); + perfPhaseRates()[np*perf + p] = wellRates()[np*newIndex + p] / double(global_num_perf_this_well); } } } // perfPressures - if( num_perf_old_well == num_perf_this_well ) + if( global_num_perf_same ) { int oldPerf_idx = oldPerf_idx_beg; for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf, ++oldPerf_idx ) @@ -257,7 +263,7 @@ namespace Opm } // perfSolventRates if (pu.has_solvent) { - if( num_perf_old_well == num_perf_this_well ) + if( global_num_perf_same ) { int oldPerf_idx = oldPerf_idx_beg; for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf, ++oldPerf_idx ) @@ -907,7 +913,7 @@ namespace Opm /// One rate pr well double solventWellRate(const int w) const { - return std::accumulate(&perfRateSolvent_[0] + first_perf_index_[w], &perfRateSolvent_[0] + first_perf_index_[w+1], 0.0); + return parallel_well_info_[w]->sumPerfValues(&perfRateSolvent_[0] + first_perf_index_[w], &perfRateSolvent_[0] + first_perf_index_[w+1]); } /// One rate pr well connection. @@ -916,7 +922,7 @@ namespace Opm /// One rate pr well double polymerWellRate(const int w) const { - return std::accumulate(&perfRatePolymer_[0] + first_perf_index_[w], &perfRatePolymer_[0] + first_perf_index_[w+1], 0.0); + return parallel_well_info_[w]->sumPerfValues(&perfRatePolymer_[0] + first_perf_index_[w], &perfRatePolymer_[0] + first_perf_index_[w+1]); } /// One rate pr well connection. @@ -925,7 +931,7 @@ namespace Opm /// One rate pr well double brineWellRate(const int w) const { - return std::accumulate(&perfRateBrine_[0] + first_perf_index_[w], &perfRateBrine_[0] + first_perf_index_[w+1], 0.0); + return parallel_well_info_[w]->sumPerfValues(&perfRateBrine_[0] + first_perf_index_[w], &perfRateBrine_[0] + first_perf_index_[w+1]); } std::vector& wellReservoirRates() diff --git a/tests/test_parallelwellinfo.cpp b/tests/test_parallelwellinfo.cpp index 0cfe06469..8e6fa2766 100644 --- a/tests/test_parallelwellinfo.cpp +++ b/tests/test_parallelwellinfo.cpp @@ -153,64 +153,76 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison) } -BOOST_AUTO_TEST_CASE(CommunicateAboveSelf) +BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf) { auto comm = Dune::MPIHelper::getLocalCommunicator(); - Opm::CommunicateAbove commAbove{ comm }; + Opm::CommunicateAboveBelow commAboveBelow{ comm }; for(std::size_t count=0; count < 2; ++count) { std::vector eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; std::vector current(eclIndex.size()); std::transform(eclIndex.begin(), eclIndex.end(), current.begin(), [](double v){ return 1+10.0*v;}); - commAbove.beginReset(); + commAboveBelow.beginReset(); for (std::size_t i = 0; i < current.size(); ++i) { if (i==0) - commAbove.pushBackEclIndex(-1, eclIndex[i]); + commAboveBelow.pushBackEclIndex(-1, eclIndex[i]); else - commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); + commAboveBelow.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); } - commAbove.endReset(); - auto above = commAbove.communicate(-10.0, current.data(), current.size()); + commAboveBelow.endReset(); + auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size()); BOOST_CHECK(above[0]==-10.0); BOOST_CHECK(above.size() == current.size()); auto a = above.begin()+1; std::for_each(current.begin(), current.begin() + (current.size()-1), [&a](double v){ BOOST_CHECK(*(a++) == v);}); + auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size()); + BOOST_CHECK(below.back() == -10.0); + BOOST_CHECK(below.size() == current.size()); + auto b = below.begin(); + std::for_each(current.begin()+1, current.end(), + [&b](double v){ BOOST_CHECK(*(b++) == v);}); } } -BOOST_AUTO_TEST_CASE(CommunicateAboveSelf1) +BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf1) { auto comm = Dune::MPIHelper::getLocalCommunicator(); - Opm::CommunicateAbove commAbove{ comm }; + Opm::CommunicateAboveBelow commAboveBelow{ comm }; for(std::size_t count=0; count < 2; ++count) { std::vector eclIndex = {0}; std::vector current(eclIndex.size()); std::transform(eclIndex.begin(), eclIndex.end(), current.begin(), [](double v){ return 1+10.0*v;}); - commAbove.beginReset(); + commAboveBelow.beginReset(); for (std::size_t i = 0; i < current.size(); ++i) { if (i==0) - commAbove.pushBackEclIndex(-1, eclIndex[i]); + commAboveBelow.pushBackEclIndex(-1, eclIndex[i]); else - commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); + commAboveBelow.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); } - commAbove.endReset(); - auto above = commAbove.communicate(-10.0, current.data(), current.size()); + commAboveBelow.endReset(); + auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size()); BOOST_CHECK(above[0]==-10.0); BOOST_CHECK(above.size() == current.size()); auto a = above.begin()+1; std::for_each(current.begin(), current.begin() + (current.size()-1), [&a](double v){ BOOST_CHECK(*(a++) == v);}); + auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size()); + BOOST_CHECK(below.back() == -10.0); + BOOST_CHECK(below.size() == current.size()); + auto b = below.begin(); + std::for_each(current.begin()+1, current.end(), + [&b](double v){ BOOST_CHECK(*(b++) == v);}); } } -BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) +BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel) { using MPIComm = typename Dune::MPIHelper::MPICommunicator; #if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) @@ -220,7 +232,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) #endif auto comm = Communication(Dune::MPIHelper::getCommunicator()); - Opm::CommunicateAbove commAbove{ comm }; + Opm::CommunicateAboveBelow commAboveBelow{ comm }; for(std::size_t count=0; count < 2; ++count) { std::vector globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; @@ -244,23 +256,23 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) std::vector current(3); - commAbove.beginReset(); + commAboveBelow.beginReset(); for (std::size_t i = 0; i < current.size(); ++i) { auto gi = comm.rank() + comm.size() * i; if (gi==0) { - commAbove.pushBackEclIndex(-1, globalEclIndex[gi]); + commAboveBelow.pushBackEclIndex(-1, globalEclIndex[gi]); } else { - commAbove.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]); + commAboveBelow.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]); } current[i] = globalCurrent[gi]; } - commAbove.endReset(); - auto above = commAbove.communicate(-10.0, current.data(), current.size()); + commAboveBelow.endReset(); + auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size()); if (comm.rank() == 0) BOOST_CHECK(above[0]==-10.0); @@ -274,5 +286,19 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) BOOST_CHECK(above[i]==globalCurrent[gi-1]); } } + auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size()); + if (comm.rank() == comm.size() - 1) + BOOST_CHECK(below.back() == -10.0); + + BOOST_CHECK(below.size() == current.size()); + + for (std::size_t i = 0; i < current.size(); ++i) + { + auto gi = comm.rank() + comm.size() * i; + if (gi < globalCurrent.size() - 1) + { + BOOST_CHECK(below[i]==globalCurrent[gi+1]); + } + } } }