Merge pull request #4434 from akva2/parwelmorepriv

ParallelWellinfo: make some more templates private
This commit is contained in:
Markus Blatt 2023-02-09 16:51:32 +01:00 committed by GitHub
commit 8b7c87ba52
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 118 additions and 89 deletions

View File

@ -23,6 +23,8 @@
#include <config.h>
#include <opm/simulators/wells/BlackoilWellModelConstraints.hpp>
#include <opm/common/ErrorMacros.hpp>
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/simulators/wells/BlackoilWellModelGeneric.hpp>

View File

@ -24,6 +24,8 @@
#include <opm/simulators/wells/BlackoilWellModelGeneric.hpp>
#include <opm/common/ErrorMacros.hpp>
#include <opm/output/data/GuideRateValue.hpp>
#include <opm/output/data/Groups.hpp>
#include <opm/output/data/Wells.hpp>

View File

@ -23,6 +23,10 @@
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#include <cassert>
#include <iterator>
#include <numeric>
namespace Dune
{
#if HAVE_MPI
@ -232,6 +236,68 @@ int CommunicateAboveBelow::endReset()
return num_local_perfs_;
}
template<class RAIterator>
void CommunicateAboveBelow::partialSumPerfValues(RAIterator begin, RAIterator end) const
{
if (this->comm_.size() < 2)
{
std::partial_sum(begin, end, begin);
}
else
{
#if HAVE_MPI
// The global index used in the index set current_indices
// is the index of the perforation in ECL Schedule definition.
// This is assumed to give the topological order that is used
// when doing the partial sum.
// allgather the index of the perforation in ECL schedule and the value.
using Value = typename std::iterator_traits<RAIterator>::value_type;
std::vector<int> sizes(comm_.size());
std::vector<int> displ(comm_.size() + 1, 0);
using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex;
using Pair = std::pair<GlobalIndex,Value>;
std::vector<Pair> my_pairs;
my_pairs.reserve(current_indices_.size());
for (const auto& pair: current_indices_)
{
if (pair.local().attribute() == owner)
{
my_pairs.emplace_back(pair.global(), begin[pair.local()]);
}
}
int mySize = my_pairs.size();
comm_.allgather(&mySize, 1, sizes.data());
std::partial_sum(sizes.begin(), sizes.end(), displ.begin()+1);
std::vector<Pair> global_pairs(displ.back());
comm_.allgatherv(my_pairs.data(), my_pairs.size(), global_pairs.data(), sizes.data(), displ.data());
// sort the complete range to get the correct ordering
std::sort(global_pairs.begin(), global_pairs.end(),
[](const Pair& p1, const Pair& p2){ return p1.first < p2.first; } );
std::vector<Value> sums(global_pairs.size());
std::transform(global_pairs.begin(), global_pairs.end(), sums.begin(),
[](const Pair& p) { return p.second; });
std::partial_sum(sums.begin(), sums.end(),sums.begin());
// assign the values (both ranges are sorted by the ecl index)
auto global_pair = global_pairs.begin();
for (const auto& pair: current_indices_)
{
global_pair = std::lower_bound(global_pair, global_pairs.end(),
pair.global(),
[](const Pair& val1, const GlobalIndex& val2)
{ return val1.first < val2; });
assert(global_pair != global_pairs.end());
assert(global_pair->first == pair.global());
begin[pair.local()] = sums[global_pair - global_pairs.begin()];
}
#else
OPM_THROW(std::logic_error, "In a sequential run the size of the communicator should be 1!");
#endif
}
}
using dIter = typename std::vector<double>::iterator;
template void CommunicateAboveBelow::partialSumPerfValues<dIter>(dIter begin, dIter end) const;
struct CopyGatherScatter
{
static const double& gather(const double* a, std::size_t i)
@ -409,12 +475,49 @@ void ParallelWellInfo::endReset()
local_num_perfs));
}
template<typename It>
typename It::value_type
ParallelWellInfo::sumPerfValues(It begin, It end) const
{
using V = typename It::value_type;
/// \todo cater for overlap later. Currently only owner
auto local = std::accumulate(begin, end, V());
return communication().sum(local);
}
using cdIter = typename std::vector<double>::const_iterator;
template typename cdIter::value_type ParallelWellInfo::sumPerfValues<cdIter>(cdIter,cdIter) const;
template typename dIter::value_type ParallelWellInfo::sumPerfValues<dIter>(dIter,dIter) const;
void ParallelWellInfo::clear()
{
commAboveBelow_->clear();
globalPerfCont_.reset();
}
template<class T>
T ParallelWellInfo::broadcastFirstPerforationValue(const T& t) const
{
T res = t;
if (rankWithFirstPerf_ >= 0) {
#ifndef NDEBUG
assert(rankWithFirstPerf_ < comm_->size());
// At least on some OpenMPI version this might broadcast might interfere
// with other communication if there are bugs
comm_->barrier();
#endif
comm_->broadcast(&res, 1, rankWithFirstPerf_);
#ifndef NDEBUG
comm_->barrier();
#endif
}
return res;
}
template int ParallelWellInfo::broadcastFirstPerforationValue<int>(const int&) const;
template double ParallelWellInfo::broadcastFirstPerforationValue<double>(const double&) const;
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
const double* current_values,
std::size_t size) const

View File

@ -17,20 +17,17 @@
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPM_PARALLELWELLINFO_HEADER_INCLUDED
#define OPM_PARALLELWELLINFO_HEADER_INCLUDED
#include <dune/common/version.hh>
#include <dune/common/parallel/communicator.hh>
#include <dune/common/parallel/interface.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <dune/common/parallel/plocalindex.hh>
#include <dune/istl/owneroverlapcopy.hh>
#include <dune/common/parallel/remoteindices.hh>
#include <opm/simulators/utils/ParallelCommunication.hpp>
#include <opm/common/ErrorMacros.hpp>
#include <memory>
#include <iterator>
#include <numeric>
namespace Opm
{
@ -56,7 +53,7 @@ public:
#if HAVE_MPI
using RI = Dune::RemoteIndices<IndexSet>;
#endif
explicit CommunicateAboveBelow(const Parallel::Communication& comm);
/// \brief Adds information about original index of the perforations in ECL Schedule.
///
@ -107,63 +104,7 @@ public:
/// \param ebd The end of the range
/// \tparam RAIterator The type og random access iterator
template<class RAIterator>
void partialSumPerfValues(RAIterator begin, RAIterator end) const
{
if (this->comm_.size() < 2)
{
std::partial_sum(begin, end, begin);
}
else
{
#if HAVE_MPI
// The global index used in the index set current_indices
// is the index of the perforation in ECL Schedule definition.
// This is assumed to give the topological order that is used
// when doing the partial sum.
// allgather the index of the perforation in ECL schedule and the value.
using Value = typename std::iterator_traits<RAIterator>::value_type;
std::vector<int> sizes(comm_.size());
std::vector<int> displ(comm_.size() + 1, 0);
using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex;
using Pair = std::pair<GlobalIndex,Value>;
std::vector<Pair> my_pairs;
my_pairs.reserve(current_indices_.size());
for (const auto& pair: current_indices_)
{
if (pair.local().attribute() == owner)
{
my_pairs.emplace_back(pair.global(), begin[pair.local()]);
}
}
int mySize = my_pairs.size();
comm_.allgather(&mySize, 1, sizes.data());
std::partial_sum(sizes.begin(), sizes.end(), displ.begin()+1);
std::vector<Pair> global_pairs(displ.back());
comm_.allgatherv(my_pairs.data(), my_pairs.size(), global_pairs.data(), sizes.data(), displ.data());
// sort the complete range to get the correct ordering
std::sort(global_pairs.begin(), global_pairs.end(),
[](const Pair& p1, const Pair& p2){ return p1.first < p2.first; } );
std::vector<Value> sums(global_pairs.size());
std::transform(global_pairs.begin(), global_pairs.end(), sums.begin(),
[](const Pair& p) { return p.second; });
std::partial_sum(sums.begin(), sums.end(),sums.begin());
// assign the values (both ranges are sorted by the ecl index)
auto global_pair = global_pairs.begin();
for (const auto& pair: current_indices_)
{
global_pair = std::lower_bound(global_pair, global_pairs.end(),
pair.global(),
[](const Pair& val1, const GlobalIndex& val2)
{ return val1.first < val2; });
assert(global_pair != global_pairs.end());
assert(global_pair->first == pair.global());
begin[pair.local()] = sums[global_pair - global_pairs.begin()];
}
#else
OPM_THROW(std::logic_error, "In a sequential run the size of the communicator should be 1!");
#endif
}
}
void partialSumPerfValues(RAIterator begin, RAIterator end) const;
/// \brief Get index set for the local perforations.
const IndexSet& getIndexSet() const;
@ -270,23 +211,7 @@ public:
/// is not initialized, and no broadcast is performed. In this case the argument
/// is returned unmodified.
template<class T>
T broadcastFirstPerforationValue(const T& t) const
{
T res = t;
if (rankWithFirstPerf_ >= 0) {
#ifndef NDEBUG
assert(rankWithFirstPerf_ < comm_->size());
// At least on some OpenMPI version this might broadcast might interfere
// with other communication if there are bugs
comm_->barrier();
#endif
comm_->broadcast(&res, 1, rankWithFirstPerf_);
#ifndef NDEBUG
comm_->barrier();
#endif
}
return res;
}
T broadcastFirstPerforationValue(const T& t) const;
/// \brief Creates an array of values for the perforation above.
/// \param first_value Value to use for above of the first perforation
@ -353,13 +278,7 @@ public:
/// \brief Sum all the values of the perforations
template<typename It>
typename std::iterator_traits<It>::value_type sumPerfValues(It begin, It end) const
{
using V = typename std::iterator_traits<It>::value_type;
/// \todo cater for overlap later. Currently only owner
auto local = std::accumulate(begin, end, V());
return communication().sum(local);
}
typename It::value_type sumPerfValues(It begin, It end) const;
/// \brief Do a (in place) partial sum on values attached to all perforations.
///

View File

@ -35,6 +35,7 @@
#include <opm/simulators/wells/WellInterfaceIndices.hpp>
#include <opm/simulators/wells/WellState.hpp>
#include <numeric>
#include <sstream>
namespace Opm

View File

@ -22,6 +22,8 @@
#include <config.h>
#include <opm/simulators/wells/WellInterfaceGeneric.hpp>
#include <opm/common/ErrorMacros.hpp>
#include <opm/input/eclipse/Schedule/Well/WellBrineProperties.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#include <opm/input/eclipse/Schedule/Well/WellFoamProperties.hpp>