mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
clean up after review
This commit is contained in:
parent
f53c597f90
commit
0202b00d59
@ -272,8 +272,7 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Ecl
|
||||
// we need to pass the NNC from root process to other processes
|
||||
if (has_numerical_aquifer && mpiSize > 1) {
|
||||
auto nnc_input = eclState.getInputNNC();
|
||||
const auto& comm_nnc = Opm::Parallel::Communication();
|
||||
EclMpiSerializer ser(comm_nnc);
|
||||
EclMpiSerializer ser(grid_->comm());
|
||||
ser.broadcast(nnc_input);
|
||||
if (mpiRank > 0) {
|
||||
eclState.setInputNNC(nnc_input);
|
||||
|
@ -68,8 +68,7 @@ class UDQState;
|
||||
|
||||
class EclGenericVanguard {
|
||||
public:
|
||||
|
||||
using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
|
||||
using ParallelWellStruct = std::vector<std::pair<std::string,bool>>;
|
||||
|
||||
/*!
|
||||
* \brief Constructor.
|
||||
|
@ -512,11 +512,11 @@ evalSummary(int reportStepNum,
|
||||
if (collectToIORank_.isParallel()) {
|
||||
#ifdef HAVE_MPI
|
||||
unsigned long buffer_size = buffer.size();
|
||||
MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED_LONG, collectToIORank_.ioRank, grid_.comm());
|
||||
grid_.comm().broadcast(&buffer_size, 1, collectToIORank_.ioRank);
|
||||
if (!collectToIORank_.isIORank())
|
||||
buffer.resize( buffer_size );
|
||||
|
||||
MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, collectToIORank_.ioRank, grid_.comm());
|
||||
grid_.comm().broadcast(buffer.data(), buffer_size, collectToIORank_.ioRank);
|
||||
if (!collectToIORank_.isIORank()) {
|
||||
SummaryState& st = summaryState;
|
||||
st.deserialize(buffer);
|
||||
|
@ -25,7 +25,6 @@
|
||||
|
||||
#include <sys/utsname.h>
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <opm/simulators/flow/SimulatorFullyImplicitBlackoilEbos.hpp>
|
||||
#include <opm/simulators/utils/ParallelFileMerger.hpp>
|
||||
#include <opm/simulators/utils/moduleVersion.hpp>
|
||||
@ -45,14 +44,6 @@
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
#endif
|
||||
|
||||
namespace Opm::Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Opm::Properties {
|
||||
|
||||
template<class TypeTag, class MyTypeTag>
|
||||
|
@ -44,16 +44,7 @@
|
||||
#include <dune/common/enumset.hh>
|
||||
#include <opm/common/utility/platform_dependent/reenable_warnings.h>
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
|
||||
namespace Opm::Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
} // end namespace Communication
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
|
@ -21,17 +21,9 @@
|
||||
#ifndef OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED
|
||||
#define OPM_GATHERCONVERGENCEREPORT_HEADER_INCLUDED
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <opm/simulators/timestepping/ConvergenceReport.hpp>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
|
||||
namespace Opm::Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
} // end namespace Communication
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
|
@ -21,20 +21,11 @@
|
||||
#ifndef OPM_DEFERREDLOGGER_HEADER_INCLUDED
|
||||
#define OPM_DEFERREDLOGGER_HEADER_INCLUDED
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace Opm::Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
/** This class implements a deferred logger:
|
||||
|
@ -26,8 +26,7 @@
|
||||
|
||||
#include <opm/material/common/Exceptions.hpp>
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
37
opm/simulators/utils/ParallelCommunication.hpp
Normal file
37
opm/simulators/utils/ParallelCommunication.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2021 SINTEF Digital, Mathematics and Cybernetics.
|
||||
|
||||
This file is part of the Open Porous Media project (OPM).
|
||||
|
||||
OPM is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
OPM is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with OPM. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef OPM_PARALLELCOMMUNICATION_HEADER_INCLUDED
|
||||
#define OPM_PARALLELCOMMUNICATION_HEADER_INCLUDED
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
namespace Parallel {
|
||||
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<MPIComm>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<MPIComm>;
|
||||
#endif
|
||||
}
|
||||
} // end namespace Opm
|
||||
#endif // OPM_PARALLELCOMMUNICATION_HEADER_INCLUDED
|
@ -24,15 +24,9 @@
|
||||
#include <opm/parser/eclipse/EclipseState/Grid/TranCalculator.hpp>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
|
||||
#include <functional>
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
namespace Opm::Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
} // end namespace Communication
|
||||
#include <functional>
|
||||
|
||||
namespace Opm {
|
||||
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
#include <map>
|
||||
@ -40,15 +42,6 @@
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
namespace Opm::Parallel {
|
||||
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<MPIComm>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<MPIComm>;
|
||||
#endif
|
||||
} // end namespace Communication
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
|
||||
@ -87,18 +80,18 @@ class State;
|
||||
namespace Mpi
|
||||
{
|
||||
template<class T>
|
||||
std::size_t packSize(const T*, std::size_t, Parallel::MPIComm,
|
||||
std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm,
|
||||
std::integral_constant<bool, false>);
|
||||
|
||||
template<class T>
|
||||
std::size_t packSize(const T*, std::size_t l, Parallel::MPIComm comm,
|
||||
std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm,
|
||||
std::integral_constant<bool, true>);
|
||||
|
||||
template<class T>
|
||||
std::size_t packSize(const T* data, std::size_t l, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T>
|
||||
std::size_t packSize(const T&, Parallel::MPIComm,
|
||||
std::size_t packSize(const T&, Opm::Parallel::MPIComm,
|
||||
std::integral_constant<bool, false>)
|
||||
{
|
||||
std::string msg = std::string{"Packing not (yet) supported for non-pod type: "} + typeid(T).name();
|
||||
@ -106,7 +99,7 @@ std::size_t packSize(const T&, Parallel::MPIComm,
|
||||
}
|
||||
|
||||
template<class T>
|
||||
std::size_t packSize(const T&, Parallel::MPIComm comm,
|
||||
std::size_t packSize(const T&, Opm::Parallel::MPIComm comm,
|
||||
std::integral_constant<bool, true>)
|
||||
{
|
||||
#if HAVE_MPI
|
||||
@ -120,69 +113,69 @@ std::size_t packSize(const T&, Parallel::MPIComm comm,
|
||||
}
|
||||
|
||||
template<class T>
|
||||
std::size_t packSize(const T& data, Parallel::MPIComm comm)
|
||||
std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm)
|
||||
{
|
||||
return packSize(data, comm, typename std::is_pod<T>::type());
|
||||
}
|
||||
|
||||
template<class T1, class T2>
|
||||
std::size_t packSize(const std::pair<T1,T2>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::pair<T1,T2>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T>
|
||||
std::size_t packSize(const std::optional<T>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::optional<T>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, class A>
|
||||
std::size_t packSize(const std::vector<T,A>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::vector<T,A>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class K, class C, class A>
|
||||
std::size_t packSize(const std::set<K,C,A>& data,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, class H, class KE, class A>
|
||||
std::size_t packSize(const std::unordered_set<T,H,KE,A>& data,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class A>
|
||||
std::size_t packSize(const std::vector<bool,A>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::vector<bool,A>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class... Ts>
|
||||
std::size_t packSize(const std::tuple<Ts...>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::tuple<Ts...>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, std::size_t N>
|
||||
std::size_t packSize(const std::array<T,N>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::array<T,N>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
std::size_t packSize(const char* str, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T1, class T2, class C, class A>
|
||||
std::size_t packSize(const std::map<T1,T2,C,A>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::map<T1,T2,C,A>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T1, class T2, class H, class P, class A>
|
||||
std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Parallel::MPIComm comm);
|
||||
std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Opm::Parallel::MPIComm comm);
|
||||
|
||||
////// pack routines
|
||||
|
||||
template<class T>
|
||||
void pack(const T*, std::size_t, std::vector<char>&, int&,
|
||||
Parallel::MPIComm, std::integral_constant<bool, false>);
|
||||
Opm::Parallel::MPIComm, std::integral_constant<bool, false>);
|
||||
|
||||
template<class T>
|
||||
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm, std::integral_constant<bool, true>);
|
||||
Opm::Parallel::MPIComm comm, std::integral_constant<bool, true>);
|
||||
|
||||
template<class T>
|
||||
void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T>
|
||||
void pack(const T&, std::vector<char>&, int&,
|
||||
Parallel::MPIComm, std::integral_constant<bool, false>)
|
||||
Opm::Parallel::MPIComm, std::integral_constant<bool, false>)
|
||||
{
|
||||
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void pack(const T& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm, std::integral_constant<bool, true>)
|
||||
Opm::Parallel::MPIComm comm, std::integral_constant<bool, true>)
|
||||
{
|
||||
#if HAVE_MPI
|
||||
MPI_Pack(&data, 1, Dune::MPITraits<T>::getType(), buffer.data(),
|
||||
@ -197,81 +190,81 @@ void pack(const T& data, std::vector<char>& buffer, int& position,
|
||||
|
||||
template<class T>
|
||||
void pack(const T& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm)
|
||||
Opm::Parallel::MPIComm comm)
|
||||
{
|
||||
pack(data, buffer, position, comm, typename std::is_pod<T>::type());
|
||||
}
|
||||
|
||||
template<class T1, class T2>
|
||||
void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T>
|
||||
void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, class A>
|
||||
void pack(const std::vector<T,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class A>
|
||||
void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class... Ts>
|
||||
void pack(const std::tuple<Ts...>& data, std::vector<char>& buffer,
|
||||
int& position, Parallel::MPIComm comm);
|
||||
int& position, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class K, class C, class A>
|
||||
void pack(const std::set<K,C,A>& data,
|
||||
std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, class H, class KE, class A>
|
||||
void pack(const std::unordered_set<T,H,KE,A>& data,
|
||||
std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, size_t N>
|
||||
void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T1, class T2, class C, class A>
|
||||
void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T1, class T2, class H, class P, class A>
|
||||
void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
void pack(const char* str, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
/// unpack routines
|
||||
|
||||
template<class T>
|
||||
void unpack(T*, const std::size_t&, std::vector<char>&, int&,
|
||||
Parallel::MPIComm, std::integral_constant<bool, false>);
|
||||
Opm::Parallel::MPIComm, std::integral_constant<bool, false>);
|
||||
|
||||
template<class T>
|
||||
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm,
|
||||
Opm::Parallel::MPIComm comm,
|
||||
std::integral_constant<bool, true>);
|
||||
|
||||
template<class T>
|
||||
void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T>
|
||||
void unpack(T&, std::vector<char>&, int&,
|
||||
Parallel::MPIComm, std::integral_constant<bool, false>)
|
||||
Opm::Parallel::MPIComm, std::integral_constant<bool, false>)
|
||||
{
|
||||
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void unpack(T& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm, std::integral_constant<bool, true>)
|
||||
Opm::Parallel::MPIComm comm, std::integral_constant<bool, true>)
|
||||
{
|
||||
#if HAVE_MPI
|
||||
MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1,
|
||||
@ -286,64 +279,64 @@ void unpack(T& data, std::vector<char>& buffer, int& position,
|
||||
|
||||
template<class T>
|
||||
void unpack(T& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm)
|
||||
Opm::Parallel::MPIComm comm)
|
||||
{
|
||||
unpack(data, buffer, position, comm, typename std::is_pod<T>::type());
|
||||
}
|
||||
|
||||
template<class T1, class T2>
|
||||
void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T>
|
||||
void unpack(std::optional<T>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, class A>
|
||||
void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class A>
|
||||
void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class... Ts>
|
||||
void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer,
|
||||
int& position, Parallel::MPIComm comm);
|
||||
int& position, Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class K, class C, class A>
|
||||
void unpack(std::set<K,C,A>& data,
|
||||
std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, class H, class KE, class A>
|
||||
void unpack(std::unordered_set<T,H,KE,A>& data,
|
||||
std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T, size_t N>
|
||||
void unpack(std::array<T,N>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T1, class T2, class C, class A>
|
||||
void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
template<class T1, class T2, class H, class P, class A>
|
||||
void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position,
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
/// prototypes for complex types
|
||||
|
||||
#define ADD_PACK_PROTOTYPES(T) \
|
||||
std::size_t packSize(const T& data, Parallel::MPIComm comm); \
|
||||
std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm); \
|
||||
void pack(const T& data, std::vector<char>& buffer, int& position, \
|
||||
Parallel::MPIComm comm); \
|
||||
Opm::Parallel::MPIComm comm); \
|
||||
void unpack(T& data, std::vector<char>& buffer, int& position, \
|
||||
Parallel::MPIComm comm);
|
||||
Opm::Parallel::MPIComm comm);
|
||||
|
||||
ADD_PACK_PROTOTYPES(data::AquiferData)
|
||||
ADD_PACK_PROTOTYPES(data::CarterTracyData)
|
||||
|
@ -19,16 +19,7 @@
|
||||
#ifndef PARALLEL_SERIALIZATION_HPP
|
||||
#define PARALLEL_SERIALIZATION_HPP
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
|
||||
namespace Opm::Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
}
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
namespace Opm {
|
||||
|
||||
|
@ -21,9 +21,7 @@
|
||||
#ifndef OPM_GATHERDEFERREDLOGGER_HEADER_INCLUDED
|
||||
#define OPM_GATHERDEFERREDLOGGER_HEADER_INCLUDED
|
||||
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
#include <opm/simulators/utils/DeferredLogger.hpp>
|
||||
#include <dune/common/version.hh>
|
||||
|
||||
namespace Opm
|
||||
{
|
||||
|
@ -22,8 +22,8 @@
|
||||
#ifndef OPM_READDECK_HEADER_INCLUDED
|
||||
#define OPM_READDECK_HEADER_INCLUDED
|
||||
|
||||
#include <dune/common/version.hh>
|
||||
#include <dune/common/parallel/mpihelper.hh>
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@ -44,14 +44,6 @@ namespace Action {
|
||||
class State;
|
||||
}
|
||||
|
||||
namespace Parallel {
|
||||
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
|
||||
using Communication = Dune::Communication<Dune::MPIHelper::MPICommunicator>;
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<Dune::MPIHelper::MPICommunicator>;
|
||||
#endif
|
||||
}
|
||||
|
||||
enum class FileOutputMode {
|
||||
//! \brief No output to files.
|
||||
OUTPUT_NONE = 0,
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include <dune/common/parallel/plocalindex.hh>
|
||||
#include <dune/istl/owneroverlapcopy.hh>
|
||||
|
||||
#include <opm/simulators/utils/ParallelCommunication.hpp>
|
||||
|
||||
#include <opm/common/ErrorMacros.hpp>
|
||||
|
||||
#include <memory>
|
||||
@ -60,7 +62,7 @@ public:
|
||||
#if HAVE_MPI
|
||||
using RI = Dune::RemoteIndices<IndexSet>;
|
||||
#endif
|
||||
|
||||
|
||||
explicit CommunicateAboveBelow(const Communication& comm);
|
||||
/// \brief Adds information about original index of the perforations in ECL Schedule.
|
||||
///
|
||||
@ -255,6 +257,7 @@ public:
|
||||
#else
|
||||
using Communication = Dune::CollectiveCommunication<MPIComm>;
|
||||
#endif
|
||||
|
||||
static constexpr int INVALID_ECL_INDEX = -1;
|
||||
|
||||
/// \brief Constructs object using MPI_COMM_SELF
|
||||
|
@ -81,7 +81,7 @@ void initLogger(std::ostringstream& log_stream) {
|
||||
|
||||
BOOST_AUTO_TEST_CASE(NoMessages)
|
||||
{
|
||||
const Opm::Parallel::Communication& cc = Dune::MPIHelper::getCollectiveCommunication();
|
||||
auto cc = Dune::MPIHelper::getCollectiveCommunication();
|
||||
|
||||
std::ostringstream log_stream;
|
||||
initLogger(log_stream);
|
||||
|
@ -108,7 +108,7 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
|
||||
{
|
||||
int argc = 0;
|
||||
char** argv = nullptr;
|
||||
const auto& helper = Dune::MPIHelper::instance(argc, argv);
|
||||
const auto& helper = Dune::MPIHelper::instance(argc, argv);
|
||||
std::vector<std::pair<std::string,bool>> pairs;
|
||||
if (helper.rank() == 0)
|
||||
pairs = {{"Test1", true},{"Test2", true}, {"Test1", false} };
|
||||
|
Loading…
Reference in New Issue
Block a user