Fix computeConnectionPressureDelta for distributed wells.

As this is as sequential (ordering matters!) as it can get we need to
communicate all perforations, do the partial sum with them and save
result back to the local perforations.
This commit is contained in:
Markus Blatt 2020-12-08 21:40:21 +01:00
parent 35218bf042
commit c0c1897ea9
2 changed files with 84 additions and 3 deletions

View File

@ -24,6 +24,7 @@
#include <dune/common/parallel/plocalindex.hh>
#include <dune/istl/owneroverlapcopy.hh>
#include <opm/common/ErrorMacros.hpp>
#include <opm/parser/eclipse/EclipseState/Schedule/Well/Well.hpp>
#include <memory>
@ -93,9 +94,76 @@ public:
std::vector<double> communicateBelow(double first_value,
const double* current,
std::size_t size);
private:
/// \brief Do a (in place) partial sum on values attached to all perforations.
///
/// For distributed wells this may include perforations stored elsewhere.
/// The result is stored in ther range given as the parameters
/// \param begin The start of the range
/// \param ebd The end of the range
/// \tparam RAIterator The type og random access iterator
template<class RAIterator>
void partialSumPerfValues(RAIterator begin, RAIterator end) const
{
if (this->comm_.size() < 2)
{
std::partial_sum(begin, end, begin);
}
else
{
#if HAVE_MPI
// The global index used in the index set current_indices
// is the index of the perforation in ECL Schedule definition.
// This is assumed to give the topological order that is used
// when doing the partial sum.
// allgather the index of the perforation in ECL schedule and the value.
using Value = typename std::iterator_traits<RAIterator>::value_type;
std::vector<int> sizes(comm_.size());
std::vector<int> displ(comm_.size(), 0);
using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex;
using Pair = std::pair<GlobalIndex,Value>;
std::vector<Pair> my_pairs;
my_pairs.reserve(current_indices_.size());
for (const auto& pair: current_indices_)
{
if (pair.local().attribute() == owner)
{
my_pairs.emplace_back(pair.global(), *(begin+(static_cast<const int&>(pair.local()))));
}
}
int mySize = my_pairs.size();
comm_.allgather(&mySize, 1, sizes.data());
std::partial_sum(sizes.begin(), sizes.end(), displ.begin()+1);
std::vector<Pair> global_pairs(displ.back());
comm_.allgatherv(my_pairs.data(), my_pairs.size(), global_pairs.data(), sizes.data(), displ.data());
// sort the complete range to get the correct ordering
std::sort(global_pairs.begin(), global_pairs.end(),
[](const Pair& p1, const Pair& p2){ return p1.first < p2.first; } );
std::vector<Value> sums(global_pairs.size());
std::transform(global_pairs.begin(), global_pairs.end(), sums.begin(),
[](const Pair& p) { return p.second; });
std::partial_sum(sums.begin(), sums.end(),sums.begin());
// assign the values (both ranges are sorted by the ecl index)
auto global_pair = global_pairs.begin();
for (const auto& pair: current_indices_)
{
global_pair = std::lower_bound(global_pair, global_pairs.end(),
pair.global(),
[](const Pair& val1, const GlobalIndex& val2)
{ return val1.first < val2; });
assert(global_pair != global_pairs.end());
assert(global_pair->first == pair.global());
*(begin + static_cast<const int&>(pair.local())) = sums[global_pair - global_pairs.begin()];
}
#else
OPM_THROW(std::logic_error, "In a sequential run the size of the communicator should be 1!");
#endif
}
}
private:
Communication comm_;
#if HAVE_MPI
/// \brief Mapping of the local well index to ecl index
IndexSet current_indices_;
/// \brief Mapping of the above well index to ecl index
@ -225,7 +293,7 @@ public:
/// \brief Sum all the values of the perforations
template<typename It>
typename std::iterator_traits<It>::value_type sumPerfValues(It begin, It end)
typename std::iterator_traits<It>::value_type sumPerfValues(It begin, It end) const
{
using V = typename std::iterator_traits<It>::value_type;
/// \todo cater for overlap later. Currently only owner
@ -233,6 +301,19 @@ public:
return communication().sum(local);
}
/// \brief Do a (in place) partial sum on values attached to all perforations.
///
/// For distributed wells this may include perforations stored elsewhere.
/// The result is stored in ther range given as the parameters
/// \param begin The start of the range
/// \param ebd The end of the range
/// \tparam RAIterator The type og random access iterator
template<class RAIterator>
void partialSumPerfValues(RAIterator begin, RAIterator end) const
{
commAboveBelow_->partialSumPerfValues(begin, end);
}
/// \brief Free data of communication data structures.
void clearCommunicateAboveBelow();
private:

View File

@ -2142,7 +2142,7 @@ namespace Opm
// This accumulation must be done per well.
const auto beg = perf_pressure_diffs_.begin();
const auto end = perf_pressure_diffs_.end();
std::partial_sum(beg, end, beg);
this->parallel_well_info_.partialSumPerfValues(beg, end);
}