mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #2976 from blattms/stdwell-comm-works-on-spe9
Make distributed standard well work on SPE9.
This commit is contained in:
@@ -212,6 +212,17 @@ opm_add_test(test_gatherdeferredlogger
|
||||
4 ${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
opm_add_test(test_parallelwellinfo_mpi
|
||||
DEPENDS "opmsimulators"
|
||||
LIBRARIES opmsimulators ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}
|
||||
SOURCES
|
||||
tests/test_parallelwellinfo.cpp
|
||||
CONDITION
|
||||
MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND
|
||||
DRIVER_ARGS
|
||||
4 ${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
include(OpmBashCompletion)
|
||||
|
||||
if (NOT BUILD_FLOW)
|
||||
|
||||
@@ -467,7 +467,7 @@ namespace Opm {
|
||||
// Clear the communication data structures for above values.
|
||||
for(auto&& pinfo : local_parallel_well_info_)
|
||||
{
|
||||
pinfo->clearCommunicateAbove();
|
||||
pinfo->clearCommunicateAboveBelow();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -43,13 +43,13 @@ struct CommPolicy<double*>
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
CommunicateAbove::CommunicateAbove([[maybe_unused]] const Communication& comm)
|
||||
CommunicateAboveBelow::CommunicateAboveBelow([[maybe_unused]] const Communication& comm)
|
||||
#if HAVE_MPI
|
||||
: comm_(comm), interface_(comm_)
|
||||
#endif
|
||||
{}
|
||||
|
||||
void CommunicateAbove::clear()
|
||||
void CommunicateAboveBelow::clear()
|
||||
{
|
||||
#if HAVE_MPI
|
||||
above_indices_ = {};
|
||||
@@ -60,7 +60,7 @@ void CommunicateAbove::clear()
|
||||
count_ = 0;
|
||||
}
|
||||
|
||||
void CommunicateAbove::beginReset()
|
||||
void CommunicateAboveBelow::beginReset()
|
||||
{
|
||||
clear();
|
||||
#if HAVE_MPI
|
||||
@@ -72,7 +72,7 @@ void CommunicateAbove::beginReset()
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommunicateAbove::endReset()
|
||||
void CommunicateAboveBelow::endReset()
|
||||
{
|
||||
#if HAVE_MPI
|
||||
if (comm_.size() > 1)
|
||||
@@ -104,7 +104,7 @@ struct CopyGatherScatter
|
||||
};
|
||||
|
||||
|
||||
std::vector<double> CommunicateAbove::communicate(double first_above,
|
||||
std::vector<double> CommunicateAboveBelow::communicateAbove(double first_above,
|
||||
const double* current,
|
||||
std::size_t size)
|
||||
{
|
||||
@@ -131,8 +131,35 @@ std::vector<double> CommunicateAbove::communicate(double first_above,
|
||||
}
|
||||
return above;
|
||||
}
|
||||
std::vector<double> CommunicateAboveBelow::communicateBelow(double last_below,
|
||||
const double* current,
|
||||
std::size_t size)
|
||||
{
|
||||
std::vector<double> below(size, last_below);
|
||||
|
||||
void CommunicateAbove::pushBackEclIndex([[maybe_unused]] int above,
|
||||
#if HAVE_MPI
|
||||
if (comm_.size() > 1)
|
||||
{
|
||||
auto belowData = below.data();
|
||||
// Ugly const_cast needed as my compiler says, that
|
||||
// passing const double*& and double* as parameter is
|
||||
// incompatible with function decl template<Data> backward(Data&, const Data&)
|
||||
// That would need the first argument to be double* const&
|
||||
communicator_.backward<CopyGatherScatter>(belowData, const_cast<double*>(current));
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (below.size() > 1)
|
||||
{
|
||||
// No comunication needed, just copy.
|
||||
std::copy(current+1, current + below.size(), below.begin());
|
||||
}
|
||||
}
|
||||
return below;
|
||||
}
|
||||
|
||||
void CommunicateAboveBelow::pushBackEclIndex([[maybe_unused]] int above,
|
||||
[[maybe_unused]] int current,
|
||||
[[maybe_unused]] bool isOwner)
|
||||
{
|
||||
@@ -175,8 +202,8 @@ ParallelWellInfo::ParallelWellInfo(const std::string& name,
|
||||
: name_(name), hasLocalCells_ (hasLocalCells),
|
||||
isOwner_(true), rankWithFirstPerf_(-1),
|
||||
comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())),
|
||||
commAbove_(new CommunicateAbove(*comm_))
|
||||
{}
|
||||
commAboveBelow_(new CommunicateAboveBelow(*comm_))
|
||||
{}
|
||||
|
||||
ParallelWellInfo::ParallelWellInfo(const std::pair<std::string,bool>& well_info,
|
||||
[[maybe_unused]] Communication allComm)
|
||||
@@ -191,7 +218,7 @@ ParallelWellInfo::ParallelWellInfo(const std::pair<std::string,bool>& well_info,
|
||||
#else
|
||||
comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator()));
|
||||
#endif
|
||||
commAbove_.reset(new CommunicateAbove(*comm_));
|
||||
commAboveBelow_.reset(new CommunicateAboveBelow(*comm_));
|
||||
isOwner_ = (comm_->rank() == 0);
|
||||
}
|
||||
|
||||
@@ -207,37 +234,52 @@ void ParallelWellInfo::communicateFirstPerforation(bool hasFirst)
|
||||
|
||||
void ParallelWellInfo::pushBackEclIndex(int above, int current)
|
||||
{
|
||||
commAbove_->pushBackEclIndex(above, current);
|
||||
commAboveBelow_->pushBackEclIndex(above, current);
|
||||
}
|
||||
|
||||
void ParallelWellInfo::beginReset()
|
||||
{
|
||||
commAbove_->beginReset();
|
||||
commAboveBelow_->beginReset();
|
||||
}
|
||||
|
||||
|
||||
void ParallelWellInfo::endReset()
|
||||
{
|
||||
commAbove_->beginReset();
|
||||
commAboveBelow_->beginReset();
|
||||
}
|
||||
|
||||
void ParallelWellInfo::clearCommunicateAbove()
|
||||
void ParallelWellInfo::clearCommunicateAboveBelow()
|
||||
{
|
||||
commAbove_->clear();
|
||||
commAboveBelow_->clear();
|
||||
}
|
||||
|
||||
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
|
||||
const double* current_values,
|
||||
std::size_t size) const
|
||||
{
|
||||
return commAbove_->communicate(zero_value, current_values,
|
||||
return commAboveBelow_->communicateAbove(zero_value, current_values,
|
||||
size);
|
||||
}
|
||||
|
||||
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
|
||||
const std::vector<double>& current_values) const
|
||||
{
|
||||
return commAbove_->communicate(zero_value, current_values.data(),
|
||||
return commAboveBelow_->communicateAbove(zero_value, current_values.data(),
|
||||
current_values.size());
|
||||
}
|
||||
|
||||
std::vector<double> ParallelWellInfo::communicateBelowValues(double last_value,
|
||||
const double* current_values,
|
||||
std::size_t size) const
|
||||
{
|
||||
return commAboveBelow_->communicateBelow(last_value, current_values,
|
||||
size);
|
||||
}
|
||||
|
||||
std::vector<double> ParallelWellInfo::communicateBelowValues(double last_value,
|
||||
const std::vector<double>& current_values) const
|
||||
{
|
||||
return commAboveBelow_->communicateBelow(last_value, current_values.data(),
|
||||
current_values.size());
|
||||
}
|
||||
|
||||
|
||||
@@ -24,16 +24,19 @@
|
||||
#include <dune/common/parallel/plocalindex.hh>
|
||||
#include <dune/istl/owneroverlapcopy.hh>
|
||||
|
||||
#include <opm/common/ErrorMacros.hpp>
|
||||
#include <opm/parser/eclipse/EclipseState/Schedule/Well/Well.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <iterator>
|
||||
#include <numeric>
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
|
||||
/// \brief Class to facilitate getting values associated with the above perforation
|
||||
/// \brief Class to facilitate getting values associated with the above/below perforation
|
||||
///
|
||||
class CommunicateAbove
|
||||
class CommunicateAboveBelow
|
||||
{
|
||||
enum Attribute {
|
||||
owner=1, overlap=2
|
||||
@@ -51,7 +54,7 @@ class CommunicateAbove
|
||||
#endif
|
||||
|
||||
public:
|
||||
explicit CommunicateAbove(const Communication& comm);
|
||||
explicit CommunicateAboveBelow(const Communication& comm);
|
||||
/// \brief Adds information about original index of the perforations in ECL Schedule.
|
||||
///
|
||||
/// \warning Theses indices need to be push in the same order as they
|
||||
@@ -79,13 +82,88 @@ public:
|
||||
/// \param current C-array of the values at the perforations
|
||||
/// \param size The size of the C-array and the returned vector
|
||||
/// \return a vector containing the values for the perforation above.
|
||||
std::vector<double> communicate(double first_value,
|
||||
std::vector<double> communicateAbove(double first_value,
|
||||
const double* current,
|
||||
std::size_t size);
|
||||
|
||||
private:
|
||||
/// \brief Creates an array of values for the perforation below.
|
||||
/// \param first_value Value to use for above of the first perforation
|
||||
/// \param current C-array of the values at the perforations
|
||||
/// \param size The size of the C-array and the returned vector
|
||||
/// \return a vector containing the values for the perforation above.
|
||||
std::vector<double> communicateBelow(double first_value,
|
||||
const double* current,
|
||||
std::size_t size);
|
||||
|
||||
/// \brief Do a (in place) partial sum on values attached to all perforations.
|
||||
///
|
||||
/// For distributed wells this may include perforations stored elsewhere.
|
||||
/// The result is stored in ther range given as the parameters
|
||||
/// \param begin The start of the range
|
||||
/// \param ebd The end of the range
|
||||
/// \tparam RAIterator The type og random access iterator
|
||||
template<class RAIterator>
|
||||
void partialSumPerfValues(RAIterator begin, RAIterator end) const
|
||||
{
|
||||
if (this->comm_.size() < 2)
|
||||
{
|
||||
std::partial_sum(begin, end, begin);
|
||||
}
|
||||
else
|
||||
{
|
||||
#if HAVE_MPI
|
||||
// The global index used in the index set current_indices
|
||||
// is the index of the perforation in ECL Schedule definition.
|
||||
// This is assumed to give the topological order that is used
|
||||
// when doing the partial sum.
|
||||
// allgather the index of the perforation in ECL schedule and the value.
|
||||
using Value = typename std::iterator_traits<RAIterator>::value_type;
|
||||
std::vector<int> sizes(comm_.size());
|
||||
std::vector<int> displ(comm_.size(), 0);
|
||||
using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex;
|
||||
using Pair = std::pair<GlobalIndex,Value>;
|
||||
std::vector<Pair> my_pairs;
|
||||
my_pairs.reserve(current_indices_.size());
|
||||
for (const auto& pair: current_indices_)
|
||||
{
|
||||
if (pair.local().attribute() == owner)
|
||||
{
|
||||
my_pairs.emplace_back(pair.global(), begin[pair.local()]);
|
||||
}
|
||||
}
|
||||
int mySize = my_pairs.size();
|
||||
comm_.allgather(&mySize, 1, sizes.data());
|
||||
std::partial_sum(sizes.begin(), sizes.end(), displ.begin()+1);
|
||||
std::vector<Pair> global_pairs(displ.back());
|
||||
comm_.allgatherv(my_pairs.data(), my_pairs.size(), global_pairs.data(), sizes.data(), displ.data());
|
||||
// sort the complete range to get the correct ordering
|
||||
std::sort(global_pairs.begin(), global_pairs.end(),
|
||||
[](const Pair& p1, const Pair& p2){ return p1.first < p2.first; } );
|
||||
std::vector<Value> sums(global_pairs.size());
|
||||
std::transform(global_pairs.begin(), global_pairs.end(), sums.begin(),
|
||||
[](const Pair& p) { return p.second; });
|
||||
std::partial_sum(sums.begin(), sums.end(),sums.begin());
|
||||
// assign the values (both ranges are sorted by the ecl index)
|
||||
auto global_pair = global_pairs.begin();
|
||||
for (const auto& pair: current_indices_)
|
||||
{
|
||||
global_pair = std::lower_bound(global_pair, global_pairs.end(),
|
||||
pair.global(),
|
||||
[](const Pair& val1, const GlobalIndex& val2)
|
||||
{ return val1.first < val2; });
|
||||
assert(global_pair != global_pairs.end());
|
||||
assert(global_pair->first == pair.global());
|
||||
*(begin + static_cast<const int&>(pair.local())) = sums[global_pair - global_pairs.begin()];
|
||||
}
|
||||
#else
|
||||
OPM_THROW(std::logic_error, "In a sequential run the size of the communicator should be 1!");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Communication comm_;
|
||||
#if HAVE_MPI
|
||||
/// \brief Mapping of the local well index to ecl index
|
||||
IndexSet current_indices_;
|
||||
/// \brief Mapping of the above well index to ecl index
|
||||
@@ -155,15 +233,31 @@ public:
|
||||
/// \param current C-array of the values at the perforations
|
||||
/// \param size The size of the C-array and the returned vector
|
||||
/// \return a vector containing the values for the perforation above.
|
||||
std::vector<double> communicateAboveValues(double zero_value,
|
||||
std::vector<double> communicateAboveValues(double first_value,
|
||||
const double* current,
|
||||
std::size_t size) const;
|
||||
|
||||
/// \brief Creates an array of values for the perforation above.
|
||||
/// \param first_value Value to use for above of the first perforation
|
||||
/// \param current vector of current values
|
||||
std::vector<double> communicateAboveValues(double zero_value,
|
||||
std::vector<double> communicateAboveValues(double first_value,
|
||||
const std::vector<double>& current) const;
|
||||
|
||||
/// \brief Creates an array of values for the perforation below.
|
||||
/// \param last_value Value to use for below of the last perforation
|
||||
/// \param current C-array of the values at the perforations
|
||||
/// \param size The size of the C-array and the returned vector
|
||||
/// \return a vector containing the values for the perforation above.
|
||||
std::vector<double> communicateBelowValues(double last_value,
|
||||
const double* current,
|
||||
std::size_t size) const;
|
||||
|
||||
/// \brief Creates an array of values for the perforation above.
|
||||
/// \param last_value Value to use for below of the last perforation
|
||||
/// \param current vector of current values
|
||||
std::vector<double> communicateBelowValues(double last_value,
|
||||
const std::vector<double>& current) const;
|
||||
|
||||
/// \brief Adds information about the ecl indices of the perforations.
|
||||
///
|
||||
/// \warning Theses indices need to be push in the same order as they
|
||||
@@ -197,8 +291,31 @@ public:
|
||||
/// \brief Inidicate completion of reset of the ecl index information
|
||||
void endReset();
|
||||
|
||||
/// \brief Sum all the values of the perforations
|
||||
template<typename It>
|
||||
typename std::iterator_traits<It>::value_type sumPerfValues(It begin, It end) const
|
||||
{
|
||||
using V = typename std::iterator_traits<It>::value_type;
|
||||
/// \todo cater for overlap later. Currently only owner
|
||||
auto local = std::accumulate(begin, end, V());
|
||||
return communication().sum(local);
|
||||
}
|
||||
|
||||
/// \brief Do a (in place) partial sum on values attached to all perforations.
|
||||
///
|
||||
/// For distributed wells this may include perforations stored elsewhere.
|
||||
/// The result is stored in ther range given as the parameters
|
||||
/// \param begin The start of the range
|
||||
/// \param ebd The end of the range
|
||||
/// \tparam RAIterator The type og random access iterator
|
||||
template<class RAIterator>
|
||||
void partialSumPerfValues(RAIterator begin, RAIterator end) const
|
||||
{
|
||||
commAboveBelow_->partialSumPerfValues(begin, end);
|
||||
}
|
||||
|
||||
/// \brief Free data of communication data structures.
|
||||
void clearCommunicateAbove();
|
||||
void clearCommunicateAboveBelow();
|
||||
private:
|
||||
|
||||
/// \brief Deleter that also frees custom MPI communicators
|
||||
@@ -222,7 +339,7 @@ private:
|
||||
std::unique_ptr<Communication, DestroyComm> comm_;
|
||||
|
||||
/// \brief used to communicate the values for the perforation above.
|
||||
std::unique_ptr<CommunicateAbove> commAbove_;
|
||||
std::unique_ptr<CommunicateAboveBelow> commAboveBelow_;
|
||||
};
|
||||
|
||||
/// \brief Class checking that all connections are on active cells
|
||||
|
||||
@@ -2142,7 +2142,7 @@ namespace Opm
|
||||
// This accumulation must be done per well.
|
||||
const auto beg = perf_pressure_diffs_.begin();
|
||||
const auto end = perf_pressure_diffs_.end();
|
||||
std::partial_sum(beg, end, beg);
|
||||
this->parallel_well_info_.partialSumPerfValues(beg, end);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -146,11 +146,12 @@ namespace Opm
|
||||
const auto& well_info = this->wellMap().at(wname);
|
||||
const int connpos = well_info[1];
|
||||
const int num_perf_this_well = well_info[2];
|
||||
int global_num_perf_this_well = parallel_well_info[w]->communication().sum(num_perf_this_well);
|
||||
|
||||
for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf) {
|
||||
if (wells_ecl[w].getStatus() == Well::Status::OPEN) {
|
||||
for (int p = 0; p < np; ++p) {
|
||||
perfphaserates_[np*perf + p] = wellRates()[np*w + p] / double(num_perf_this_well);
|
||||
perfphaserates_[np*perf + p] = wellRates()[np*w + p] / double(global_num_perf_this_well);
|
||||
}
|
||||
}
|
||||
perfPress()[perf] = cellPressures[well_perf_data[w][perf-connpos].cell_index];
|
||||
@@ -224,6 +225,10 @@ namespace Opm
|
||||
// perfPhaseRates
|
||||
const int oldPerf_idx_beg = (*it).second[ 1 ];
|
||||
const int num_perf_old_well = (*it).second[ 2 ];
|
||||
int num_perf_changed = (num_perf_old_well != num_perf_this_well) ? 1 : 0;
|
||||
num_perf_changed = parallel_well_info[w]->communication().sum(num_perf_changed);
|
||||
bool global_num_perf_same = (num_perf_changed == 0);
|
||||
|
||||
// copy perforation rates when the number of perforations is equal,
|
||||
// otherwise initialize perfphaserates to well rates divided by the number of perforations.
|
||||
|
||||
@@ -231,7 +236,7 @@ namespace Opm
|
||||
if (new_iter == this->wellMap().end())
|
||||
throw std::logic_error("Fatal error in WellStateFullyImplicitBlackoil - could not find well: " + well.name());
|
||||
int connpos = new_iter->second[1];
|
||||
if( num_perf_old_well == num_perf_this_well )
|
||||
if( global_num_perf_same )
|
||||
{
|
||||
int old_perf_phase_idx = oldPerf_idx_beg *np;
|
||||
for (int perf_phase_idx = connpos*np;
|
||||
@@ -240,14 +245,15 @@ namespace Opm
|
||||
perfPhaseRates()[ perf_phase_idx ] = prevState->perfPhaseRates()[ old_perf_phase_idx ];
|
||||
}
|
||||
} else {
|
||||
int global_num_perf_this_well = parallel_well_info[w]->communication().sum(num_perf_this_well);
|
||||
for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf) {
|
||||
for (int p = 0; p < np; ++p) {
|
||||
perfPhaseRates()[np*perf + p] = wellRates()[np*newIndex + p] / double(num_perf_this_well);
|
||||
perfPhaseRates()[np*perf + p] = wellRates()[np*newIndex + p] / double(global_num_perf_this_well);
|
||||
}
|
||||
}
|
||||
}
|
||||
// perfPressures
|
||||
if( num_perf_old_well == num_perf_this_well )
|
||||
if( global_num_perf_same )
|
||||
{
|
||||
int oldPerf_idx = oldPerf_idx_beg;
|
||||
for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf, ++oldPerf_idx )
|
||||
@@ -257,7 +263,7 @@ namespace Opm
|
||||
}
|
||||
// perfSolventRates
|
||||
if (pu.has_solvent) {
|
||||
if( num_perf_old_well == num_perf_this_well )
|
||||
if( global_num_perf_same )
|
||||
{
|
||||
int oldPerf_idx = oldPerf_idx_beg;
|
||||
for (int perf = connpos; perf < connpos + num_perf_this_well; ++perf, ++oldPerf_idx )
|
||||
@@ -907,7 +913,7 @@ namespace Opm
|
||||
|
||||
/// One rate pr well
|
||||
double solventWellRate(const int w) const {
|
||||
return std::accumulate(&perfRateSolvent_[0] + first_perf_index_[w], &perfRateSolvent_[0] + first_perf_index_[w+1], 0.0);
|
||||
return parallel_well_info_[w]->sumPerfValues(&perfRateSolvent_[0] + first_perf_index_[w], &perfRateSolvent_[0] + first_perf_index_[w+1]);
|
||||
}
|
||||
|
||||
/// One rate pr well connection.
|
||||
@@ -916,7 +922,7 @@ namespace Opm
|
||||
|
||||
/// One rate pr well
|
||||
double polymerWellRate(const int w) const {
|
||||
return std::accumulate(&perfRatePolymer_[0] + first_perf_index_[w], &perfRatePolymer_[0] + first_perf_index_[w+1], 0.0);
|
||||
return parallel_well_info_[w]->sumPerfValues(&perfRatePolymer_[0] + first_perf_index_[w], &perfRatePolymer_[0] + first_perf_index_[w+1]);
|
||||
}
|
||||
|
||||
/// One rate pr well connection.
|
||||
@@ -925,7 +931,7 @@ namespace Opm
|
||||
|
||||
/// One rate pr well
|
||||
double brineWellRate(const int w) const {
|
||||
return std::accumulate(&perfRateBrine_[0] + first_perf_index_[w], &perfRateBrine_[0] + first_perf_index_[w+1], 0.0);
|
||||
return parallel_well_info_[w]->sumPerfValues(&perfRateBrine_[0] + first_perf_index_[w], &perfRateBrine_[0] + first_perf_index_[w+1]);
|
||||
}
|
||||
|
||||
std::vector<double>& wellReservoirRates()
|
||||
|
||||
@@ -153,64 +153,76 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
|
||||
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveSelf)
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf)
|
||||
{
|
||||
auto comm = Dune::MPIHelper::getLocalCommunicator();
|
||||
Opm::CommunicateAbove commAbove{ comm };
|
||||
Opm::CommunicateAboveBelow commAboveBelow{ comm };
|
||||
for(std::size_t count=0; count < 2; ++count)
|
||||
{
|
||||
std::vector<int> eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
|
||||
std::vector<double> current(eclIndex.size());
|
||||
std::transform(eclIndex.begin(), eclIndex.end(), current.begin(),
|
||||
[](double v){ return 1+10.0*v;});
|
||||
commAbove.beginReset();
|
||||
commAboveBelow.beginReset();
|
||||
for (std::size_t i = 0; i < current.size(); ++i)
|
||||
{
|
||||
if (i==0)
|
||||
commAbove.pushBackEclIndex(-1, eclIndex[i]);
|
||||
commAboveBelow.pushBackEclIndex(-1, eclIndex[i]);
|
||||
else
|
||||
commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]);
|
||||
commAboveBelow.pushBackEclIndex(eclIndex[i-1], eclIndex[i]);
|
||||
}
|
||||
commAbove.endReset();
|
||||
auto above = commAbove.communicate(-10.0, current.data(), current.size());
|
||||
commAboveBelow.endReset();
|
||||
auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size());
|
||||
BOOST_CHECK(above[0]==-10.0);
|
||||
BOOST_CHECK(above.size() == current.size());
|
||||
auto a = above.begin()+1;
|
||||
std::for_each(current.begin(), current.begin() + (current.size()-1),
|
||||
[&a](double v){ BOOST_CHECK(*(a++) == v);});
|
||||
auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size());
|
||||
BOOST_CHECK(below.back() == -10.0);
|
||||
BOOST_CHECK(below.size() == current.size());
|
||||
auto b = below.begin();
|
||||
std::for_each(current.begin()+1, current.end(),
|
||||
[&b](double v){ BOOST_CHECK(*(b++) == v);});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveSelf1)
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf1)
|
||||
{
|
||||
auto comm = Dune::MPIHelper::getLocalCommunicator();
|
||||
Opm::CommunicateAbove commAbove{ comm };
|
||||
Opm::CommunicateAboveBelow commAboveBelow{ comm };
|
||||
for(std::size_t count=0; count < 2; ++count)
|
||||
{
|
||||
std::vector<int> eclIndex = {0};
|
||||
std::vector<double> current(eclIndex.size());
|
||||
std::transform(eclIndex.begin(), eclIndex.end(), current.begin(),
|
||||
[](double v){ return 1+10.0*v;});
|
||||
commAbove.beginReset();
|
||||
commAboveBelow.beginReset();
|
||||
for (std::size_t i = 0; i < current.size(); ++i)
|
||||
{
|
||||
if (i==0)
|
||||
commAbove.pushBackEclIndex(-1, eclIndex[i]);
|
||||
commAboveBelow.pushBackEclIndex(-1, eclIndex[i]);
|
||||
else
|
||||
commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]);
|
||||
commAboveBelow.pushBackEclIndex(eclIndex[i-1], eclIndex[i]);
|
||||
}
|
||||
commAbove.endReset();
|
||||
auto above = commAbove.communicate(-10.0, current.data(), current.size());
|
||||
commAboveBelow.endReset();
|
||||
auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size());
|
||||
BOOST_CHECK(above[0]==-10.0);
|
||||
BOOST_CHECK(above.size() == current.size());
|
||||
auto a = above.begin()+1;
|
||||
std::for_each(current.begin(), current.begin() + (current.size()-1),
|
||||
[&a](double v){ BOOST_CHECK(*(a++) == v);});
|
||||
auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size());
|
||||
BOOST_CHECK(below.back() == -10.0);
|
||||
BOOST_CHECK(below.size() == current.size());
|
||||
auto b = below.begin();
|
||||
std::for_each(current.begin()+1, current.end(),
|
||||
[&b](double v){ BOOST_CHECK(*(b++) == v);});
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveParalle)
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel)
|
||||
{
|
||||
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
@@ -220,7 +232,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle)
|
||||
#endif
|
||||
auto comm = Communication(Dune::MPIHelper::getCommunicator());
|
||||
|
||||
Opm::CommunicateAbove commAbove{ comm };
|
||||
Opm::CommunicateAboveBelow commAboveBelow{ comm };
|
||||
for(std::size_t count=0; count < 2; ++count)
|
||||
{
|
||||
std::vector<int> globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
|
||||
@@ -244,23 +256,23 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle)
|
||||
|
||||
std::vector<double> current(3);
|
||||
|
||||
commAbove.beginReset();
|
||||
commAboveBelow.beginReset();
|
||||
for (std::size_t i = 0; i < current.size(); ++i)
|
||||
{
|
||||
auto gi = comm.rank() + comm.size() * i;
|
||||
|
||||
if (gi==0)
|
||||
{
|
||||
commAbove.pushBackEclIndex(-1, globalEclIndex[gi]);
|
||||
commAboveBelow.pushBackEclIndex(-1, globalEclIndex[gi]);
|
||||
}
|
||||
else
|
||||
{
|
||||
commAbove.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]);
|
||||
commAboveBelow.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]);
|
||||
}
|
||||
current[i] = globalCurrent[gi];
|
||||
}
|
||||
commAbove.endReset();
|
||||
auto above = commAbove.communicate(-10.0, current.data(), current.size());
|
||||
commAboveBelow.endReset();
|
||||
auto above = commAboveBelow.communicateAbove(-10.0, current.data(), current.size());
|
||||
if (comm.rank() == 0)
|
||||
BOOST_CHECK(above[0]==-10.0);
|
||||
|
||||
@@ -274,5 +286,19 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveParalle)
|
||||
BOOST_CHECK(above[i]==globalCurrent[gi-1]);
|
||||
}
|
||||
}
|
||||
auto below = commAboveBelow.communicateBelow(-10.0, current.data(), current.size());
|
||||
if (comm.rank() == comm.size() - 1)
|
||||
BOOST_CHECK(below.back() == -10.0);
|
||||
|
||||
BOOST_CHECK(below.size() == current.size());
|
||||
|
||||
for (std::size_t i = 0; i < current.size(); ++i)
|
||||
{
|
||||
auto gi = comm.rank() + comm.size() * i;
|
||||
if (gi < globalCurrent.size() - 1)
|
||||
{
|
||||
BOOST_CHECK(below[i]==globalCurrent[gi+1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user