mirror of
https://github.com/OPM/opm-simulators.git
synced 2024-11-21 16:57:25 -06:00
consistently use ParallelCommunication.hpp for communication definition
This commit is contained in:
parent
a322a3062f
commit
2c0ff6f81e
@ -553,7 +553,7 @@ Inplace EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
outputFipLog(std::map<std::string, double>& miscSummaryData,
|
||||
std::map<std::string, std::vector<double>>& regionData,
|
||||
const bool substep,
|
||||
const Comm& comm)
|
||||
const Parallel::Communication& comm)
|
||||
{
|
||||
auto inplace = this->accumulateRegionSums(comm);
|
||||
if (comm.rank() != 0)
|
||||
@ -574,7 +574,7 @@ Inplace EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
outputFipresvLog(std::map<std::string, double>& miscSummaryData,
|
||||
std::map<std::string, std::vector<double>>& regionData,
|
||||
const bool substep,
|
||||
const Comm& comm)
|
||||
const Parallel::Communication& comm)
|
||||
{
|
||||
auto inplace = this->accumulateRegionSums(comm);
|
||||
if (comm.rank() != 0)
|
||||
@ -835,7 +835,7 @@ EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
regionSum(const ScalarBuffer& property,
|
||||
const std::vector<int>& regionId,
|
||||
size_t maxNumberOfRegions,
|
||||
const Comm& comm)
|
||||
const Parallel::Communication& comm)
|
||||
{
|
||||
ScalarBuffer totals(maxNumberOfRegions, 0.0);
|
||||
|
||||
@ -1502,7 +1502,7 @@ namespace {
|
||||
|
||||
template<class FluidSystem,class Scalar>
|
||||
void EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
outputErrorLog(const Comm& comm) const
|
||||
outputErrorLog(const Parallel::Communication& comm) const
|
||||
{
|
||||
const auto root = 0;
|
||||
auto globalFailedCellsPbub = gatherv(this->failedCellsPb_, comm, root);
|
||||
@ -1626,7 +1626,7 @@ outputFipresvLogImpl(const Inplace& inplace) const
|
||||
template<class FluidSystem,class Scalar>
|
||||
int EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
regionMax(const std::vector<int>& region,
|
||||
const Comm& comm)
|
||||
const Parallel::Communication& comm)
|
||||
{
|
||||
const auto max_value = region.empty() ? 0 : *std::max_element(region.begin(), region.end());
|
||||
return comm.max(max_value);
|
||||
@ -1653,7 +1653,7 @@ template<class FluidSystem,class Scalar>
|
||||
void EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
makeRegionSum(Inplace& inplace,
|
||||
const std::string& region_name,
|
||||
const Comm& comm) const
|
||||
const Parallel::Communication& comm) const
|
||||
{
|
||||
const auto& region = this->regions_.at(region_name);
|
||||
const std::size_t ntFip = this->regionMax(region, comm);
|
||||
@ -1689,7 +1689,7 @@ makeRegionSum(Inplace& inplace,
|
||||
|
||||
template<class FluidSystem,class Scalar>
|
||||
Inplace EclGenericOutputBlackoilModule<FluidSystem,Scalar>::
|
||||
accumulateRegionSums(const Comm& comm)
|
||||
accumulateRegionSums(const Parallel::Communication& comm)
|
||||
{
|
||||
Inplace inplace;
|
||||
|
||||
|
@ -35,13 +35,7 @@
|
||||
#include <opm/input/eclipse/Schedule/SummaryState.hpp>
|
||||
#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
#include <dune/common/parallel/communication.hh>
|
||||
#else
|
||||
#include <dune/common/parallel/collectivecommunication.hh>
|
||||
#endif
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <ebos/eclinterregflows.hh>
|
||||
|
||||
@ -53,8 +47,6 @@ class EclipseState;
|
||||
template<class FluidSystem, class Scalar>
|
||||
class EclGenericOutputBlackoilModule {
|
||||
public:
|
||||
using Comm = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
|
||||
// write cumulative production and injection reports to output
|
||||
void outputCumLog(size_t reportStepNum,
|
||||
const bool substep,
|
||||
@ -74,17 +66,17 @@ public:
|
||||
Inplace outputFipLog(std::map<std::string, double>& miscSummaryData,
|
||||
std::map<std::string, std::vector<double>>& regionData,
|
||||
const bool substep,
|
||||
const Comm& comm);
|
||||
const Parallel::Communication& comm);
|
||||
|
||||
// write Reservoir Volumes to output log
|
||||
Inplace outputFipresvLog(std::map<std::string, double>& miscSummaryData,
|
||||
std::map<std::string, std::vector<double>>& regionData,
|
||||
const bool substep,
|
||||
const Comm& comm);
|
||||
const Parallel::Communication& comm);
|
||||
|
||||
|
||||
|
||||
void outputErrorLog(const Comm& comm) const;
|
||||
void outputErrorLog(const Parallel::Communication& comm) const;
|
||||
|
||||
void addRftDataToWells(data::Wells& wellDatas,
|
||||
size_t reportStepNum);
|
||||
@ -333,9 +325,9 @@ protected:
|
||||
|
||||
void makeRegionSum(Inplace& inplace,
|
||||
const std::string& region_name,
|
||||
const Comm& comm) const;
|
||||
const Parallel::Communication& comm) const;
|
||||
|
||||
Inplace accumulateRegionSums(const Comm& comm);
|
||||
Inplace accumulateRegionSums(const Parallel::Communication& comm);
|
||||
|
||||
void updateSummaryRegionValues(const Inplace& inplace,
|
||||
std::map<std::string, double>& miscSummaryData,
|
||||
@ -358,10 +350,10 @@ protected:
|
||||
static ScalarBuffer regionSum(const ScalarBuffer& property,
|
||||
const std::vector<int>& regionId,
|
||||
const std::size_t maxNumberOfRegions,
|
||||
const Comm& comm);
|
||||
const Parallel::Communication& comm);
|
||||
|
||||
static int regionMax(const std::vector<int>& region,
|
||||
const Comm& comm);
|
||||
const Parallel::Communication& comm);
|
||||
|
||||
static void update(Inplace& inplace,
|
||||
const std::string& region_name,
|
||||
|
@ -21,7 +21,7 @@
|
||||
#ifndef ECL_MPI_SERIALIZER_HH
|
||||
#define ECL_MPI_SERIALIZER_HH
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
#include <opm/simulators/utils/ParallelRestart.hpp>
|
||||
|
||||
#include <optional>
|
||||
@ -550,7 +550,7 @@ protected:
|
||||
data->serializeOp(*this);
|
||||
}
|
||||
|
||||
Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator> m_comm; //!< Communicator to broadcast using
|
||||
Parallel::Communication m_comm; //!< Communicator to broadcast using
|
||||
|
||||
Operation m_op = Operation::PACKSIZE; //!< Current operation
|
||||
size_t m_packSize = 0; //!< Required buffer size after PACKSIZE has been done
|
||||
|
@ -142,7 +142,7 @@ private:
|
||||
|
||||
std::shared_ptr<ParallelIndexSet> indexSet_;
|
||||
std::shared_ptr<RemoteIndices> remoteIndices_;
|
||||
Dune::CollectiveCommunication<MPI_Comm> communicator_;
|
||||
Parallel::Communication communicator_;
|
||||
mutable std::vector<double> ownerMask_;
|
||||
};
|
||||
|
||||
|
@ -46,7 +46,8 @@ struct CommPolicy<double*>
|
||||
namespace Opm
|
||||
{
|
||||
|
||||
GlobalPerfContainerFactory::GlobalPerfContainerFactory(const IndexSet& local_indices, const Communication comm,
|
||||
GlobalPerfContainerFactory::GlobalPerfContainerFactory(const IndexSet& local_indices,
|
||||
const Parallel::Communication comm,
|
||||
const int num_local_perfs)
|
||||
: local_indices_(local_indices), comm_(comm)
|
||||
{
|
||||
@ -182,7 +183,7 @@ int GlobalPerfContainerFactory::numGlobalPerfs() const
|
||||
}
|
||||
|
||||
|
||||
CommunicateAboveBelow::CommunicateAboveBelow([[maybe_unused]] const Communication& comm)
|
||||
CommunicateAboveBelow::CommunicateAboveBelow([[maybe_unused]] const Parallel::Communication& comm)
|
||||
#if HAVE_MPI
|
||||
: comm_(comm), interface_(comm_)
|
||||
#endif
|
||||
@ -319,7 +320,7 @@ void CommunicateAboveBelow::pushBackEclIndex([[maybe_unused]] int above,
|
||||
}
|
||||
|
||||
|
||||
void ParallelWellInfo::DestroyComm::operator()(Communication* comm)
|
||||
void ParallelWellInfo::DestroyComm::operator()(Parallel::Communication* comm)
|
||||
{
|
||||
#if HAVE_MPI
|
||||
// Only delete custom communicators.
|
||||
@ -353,13 +354,13 @@ ParallelWellInfo::ParallelWellInfo(const std::string& name,
|
||||
bool hasLocalCells)
|
||||
: name_(name), hasLocalCells_ (hasLocalCells),
|
||||
isOwner_(true), rankWithFirstPerf_(-1),
|
||||
comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())),
|
||||
comm_(new Parallel::Communication(Dune::MPIHelper::getLocalCommunicator())),
|
||||
commAboveBelow_(new CommunicateAboveBelow(*comm_))
|
||||
{}
|
||||
|
||||
|
||||
ParallelWellInfo::ParallelWellInfo(const std::pair<std::string, bool>& well_info,
|
||||
[[maybe_unused]] Communication allComm)
|
||||
[[maybe_unused]] Parallel::Communication allComm)
|
||||
: name_(well_info.first), hasLocalCells_(well_info.second),
|
||||
rankWithFirstPerf_(-1)
|
||||
{
|
||||
@ -367,9 +368,9 @@ ParallelWellInfo::ParallelWellInfo(const std::pair<std::string, bool>& well_info
|
||||
MPI_Comm newComm;
|
||||
int color = hasLocalCells_ ? 1 : MPI_UNDEFINED;
|
||||
MPI_Comm_split(allComm, color, allComm.rank(), &newComm);
|
||||
comm_.reset(new Communication(newComm));
|
||||
comm_.reset(new Parallel::Communication(newComm));
|
||||
#else
|
||||
comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator()));
|
||||
comm_.reset(new Parallel::Communication(Dune::MPIHelper::getLocalCommunicator()));
|
||||
#endif
|
||||
commAboveBelow_.reset(new CommunicateAboveBelow(*comm_));
|
||||
isOwner_ = (comm_->rank() == 0);
|
||||
|
@ -51,19 +51,13 @@ public:
|
||||
ownerAbove = 3,
|
||||
overlapAbove = 4
|
||||
};
|
||||
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<MPIComm>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<MPIComm>;
|
||||
#endif
|
||||
using LocalIndex = Dune::ParallelLocalIndex<Attribute>;
|
||||
using IndexSet = Dune::ParallelIndexSet<int,LocalIndex,50>;
|
||||
#if HAVE_MPI
|
||||
using RI = Dune::RemoteIndices<IndexSet>;
|
||||
#endif
|
||||
|
||||
explicit CommunicateAboveBelow(const Communication& comm);
|
||||
explicit CommunicateAboveBelow(const Parallel::Communication& comm);
|
||||
/// \brief Adds information about original index of the perforations in ECL Schedule.
|
||||
///
|
||||
/// \warning Theses indices need to be push in the same order as they
|
||||
@ -175,8 +169,9 @@ public:
|
||||
const IndexSet& getIndexSet() const;
|
||||
|
||||
int numLocalPerfs() const;
|
||||
|
||||
private:
|
||||
Communication comm_;
|
||||
Parallel::Communication comm_;
|
||||
/// \brief Mapping of the local well index to ecl index
|
||||
IndexSet current_indices_;
|
||||
#if HAVE_MPI
|
||||
@ -198,19 +193,14 @@ private:
|
||||
class GlobalPerfContainerFactory
|
||||
{
|
||||
public:
|
||||
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<MPIComm>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<MPIComm>;
|
||||
#endif
|
||||
using IndexSet = CommunicateAboveBelow::IndexSet;
|
||||
using Attribute = CommunicateAboveBelow::Attribute;
|
||||
using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex;
|
||||
|
||||
/// \brief Constructor
|
||||
/// \param local_indices completely set up index set for map ecl index to local index
|
||||
GlobalPerfContainerFactory(const IndexSet& local_indices, const Communication comm,
|
||||
GlobalPerfContainerFactory(const IndexSet& local_indices,
|
||||
const Parallel::Communication comm,
|
||||
int num_local_perfs);
|
||||
|
||||
/// \brief Creates a container that holds values for all perforations
|
||||
@ -229,9 +219,10 @@ public:
|
||||
std::size_t num_components) const;
|
||||
|
||||
int numGlobalPerfs() const;
|
||||
|
||||
private:
|
||||
const IndexSet& local_indices_;
|
||||
Communication comm_;
|
||||
Parallel::Communication comm_;
|
||||
int num_global_perfs_;
|
||||
/// \brief sizes for allgatherv
|
||||
std::vector<int> sizes_;
|
||||
@ -251,13 +242,6 @@ private:
|
||||
class ParallelWellInfo
|
||||
{
|
||||
public:
|
||||
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<MPIComm>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<MPIComm>;
|
||||
#endif
|
||||
|
||||
static constexpr int INVALID_ECL_INDEX = -1;
|
||||
|
||||
/// \brief Constructs object using MPI_COMM_SELF
|
||||
@ -271,9 +255,9 @@ public:
|
||||
/// \param allComm The communication object with all MPI ranks active in the simulation.
|
||||
/// Default is the one with all ranks available.
|
||||
ParallelWellInfo(const std::pair<std::string,bool>& well_info,
|
||||
Communication allComm);
|
||||
Parallel::Communication allComm);
|
||||
|
||||
const Communication& communication() const
|
||||
const Parallel::Communication& communication() const
|
||||
{
|
||||
return *comm_;
|
||||
}
|
||||
@ -399,12 +383,13 @@ public:
|
||||
/// it is stored. Container is ordered via ascendings index of the perforations
|
||||
/// in the ECL schedule.
|
||||
const GlobalPerfContainerFactory& getGlobalPerfContainerFactory() const;
|
||||
|
||||
private:
|
||||
|
||||
/// \brief Deleter that also frees custom MPI communicators
|
||||
struct DestroyComm
|
||||
{
|
||||
void operator()(Communication* comm);
|
||||
void operator()(Parallel::Communication* comm);
|
||||
};
|
||||
|
||||
|
||||
@ -419,7 +404,7 @@ private:
|
||||
/// \brief Communication object for the well
|
||||
///
|
||||
/// Contains only ranks where this well will perforate local cells.
|
||||
std::unique_ptr<Communication, DestroyComm> comm_;
|
||||
std::unique_ptr<Parallel::Communication, DestroyComm> comm_;
|
||||
|
||||
/// \brief used to communicate the values for the perforation above.
|
||||
std::unique_ptr<CommunicateAboveBelow> commAboveBelow_;
|
||||
@ -443,6 +428,7 @@ public:
|
||||
void connectionFound(std::size_t index);
|
||||
|
||||
bool checkAllConnectionsFound();
|
||||
|
||||
private:
|
||||
std::vector<std::size_t> foundConnections_;
|
||||
const Well& well_;
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include <opm/input/eclipse/Schedule/Schedule.hpp>
|
||||
#include <opm/simulators/wells/ParallelWellInfo.hpp>
|
||||
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <numeric>
|
||||
@ -825,8 +827,8 @@ WellState::parallelWellInfo(std::size_t well_index) const
|
||||
return ws.parallel_info;
|
||||
}
|
||||
|
||||
template void WellState::updateGlobalIsGrup<ParallelWellInfo::Communication>(const ParallelWellInfo::Communication& comm);
|
||||
template void WellState::communicateGroupRates<ParallelWellInfo::Communication>(const ParallelWellInfo::Communication& comm);
|
||||
template void WellState::updateGlobalIsGrup<Parallel::Communication>(const Parallel::Communication& comm);
|
||||
template void WellState::communicateGroupRates<Parallel::Communication>(const Parallel::Communication& comm);
|
||||
} // namespace Opm
|
||||
|
||||
|
||||
|
@ -21,6 +21,8 @@
|
||||
|
||||
#include<opm/simulators/wells/ParallelWellInfo.hpp>
|
||||
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include<vector>
|
||||
#include<string>
|
||||
@ -118,7 +120,7 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
|
||||
std::vector<Opm::ParallelWellInfo> well_info;
|
||||
|
||||
for (const auto& wellinfo : pairs) {
|
||||
well_info.emplace_back(wellinfo, Opm::ParallelWellInfo::Communication());
|
||||
well_info.emplace_back(wellinfo, Opm::Parallel::Communication());
|
||||
}
|
||||
|
||||
//well_info.assign(pairs.begin(), pairs.end());
|
||||
@ -154,7 +156,7 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
|
||||
BOOST_CHECK(well_info[0].communication().size()==1);
|
||||
|
||||
#if HAVE_MPI
|
||||
Opm::ParallelWellInfo::Communication comm{MPI_COMM_WORLD};
|
||||
Opm::Parallel::Communication comm{MPI_COMM_WORLD};
|
||||
|
||||
BOOST_CHECK(well_info[1].communication().size() == comm.size());
|
||||
|
||||
@ -235,7 +237,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf1)
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> createGlobalEclIndex(const Opm::ParallelWellInfo::Communication& comm)
|
||||
std::vector<int> createGlobalEclIndex(const Opm::Parallel::Communication& comm)
|
||||
{
|
||||
std::vector<int> globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
|
||||
auto oldSize = globalEclIndex.size();
|
||||
@ -256,7 +258,7 @@ std::vector<int> createGlobalEclIndex(const Opm::ParallelWellInfo::Communication
|
||||
|
||||
template<class C>
|
||||
std::vector<double> populateCommAbove(C& commAboveBelow,
|
||||
const Opm::ParallelWellInfo::Communication& comm,
|
||||
const Opm::Parallel::Communication& comm,
|
||||
const std::vector<int>& globalEclIndex,
|
||||
const std::vector<double> globalCurrent,
|
||||
int num_component = 1,
|
||||
@ -287,7 +289,7 @@ std::vector<double> populateCommAbove(C& commAboveBelow,
|
||||
|
||||
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel)
|
||||
{
|
||||
auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
|
||||
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
|
||||
|
||||
Opm::CommunicateAboveBelow commAboveBelow{ comm };
|
||||
for(std::size_t count=0; count < 2; ++count)
|
||||
@ -365,7 +367,7 @@ BOOST_AUTO_TEST_CASE(PartialSumself)
|
||||
commAboveBelow.endReset();
|
||||
|
||||
initRandomNumbers(std::begin(current), std::end(current),
|
||||
Opm::ParallelWellInfo::Communication(comm));
|
||||
Opm::Parallel::Communication(comm));
|
||||
auto stdCopy = current;
|
||||
std::partial_sum(std::begin(stdCopy), std::end(stdCopy), std::begin(stdCopy));
|
||||
|
||||
@ -379,13 +381,13 @@ BOOST_AUTO_TEST_CASE(PartialSumself)
|
||||
BOOST_AUTO_TEST_CASE(PartialSumParallel)
|
||||
{
|
||||
|
||||
auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
|
||||
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
|
||||
|
||||
Opm::CommunicateAboveBelow commAboveBelow{ comm };
|
||||
auto globalEclIndex = createGlobalEclIndex(comm);
|
||||
std::vector<double> globalCurrent(globalEclIndex.size());
|
||||
initRandomNumbers(std::begin(globalCurrent), std::end(globalCurrent),
|
||||
Opm::ParallelWellInfo::Communication(comm));
|
||||
Opm::Parallel::Communication(comm));
|
||||
|
||||
auto localCurrent = populateCommAbove(commAboveBelow, comm,
|
||||
globalEclIndex, globalCurrent);
|
||||
@ -407,7 +409,7 @@ BOOST_AUTO_TEST_CASE(PartialSumParallel)
|
||||
|
||||
void testGlobalPerfFactoryParallel(int num_component, bool local_consecutive = false)
|
||||
{
|
||||
auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
|
||||
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
|
||||
|
||||
Opm::ParallelWellInfo wellInfo{ {"Test", true }, comm };
|
||||
auto globalEclIndex = createGlobalEclIndex(comm);
|
||||
@ -473,7 +475,7 @@ BOOST_AUTO_TEST_CASE(GlobalPerfFactoryParallel1)
|
||||
|
||||
|
||||
BOOST_AUTO_TEST_CASE(EmptyWell) {
|
||||
auto comm = Opm::ParallelWellInfo::Communication(Dune::MPIHelper::getCommunicator());
|
||||
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
|
||||
Opm::ParallelWellInfo pw({"WELL1", true}, comm);
|
||||
pw.communicateFirstPerforation(false);
|
||||
double local_p = 1;
|
||||
|
Loading…
Reference in New Issue
Block a user