diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index c313dc8d0..7e41217a8 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -74,6 +74,7 @@ list (APPEND MAIN_SOURCE_FILES opm/simulators/timestepping/gatherConvergenceReport.cpp opm/simulators/utils/DeferredLogger.cpp opm/simulators/utils/gatherDeferredLogger.cpp + opm/simulators/utils/MPIPacker.cpp opm/simulators/utils/ParallelFileMerger.cpp opm/simulators/utils/ParallelRestart.cpp opm/simulators/wells/ALQState.cpp @@ -180,7 +181,7 @@ list (APPEND TEST_SOURCE_FILES if(MPI_FOUND) list(APPEND TEST_SOURCE_FILES tests/test_parallelistlinformation.cpp - tests/test_ParallelRestart.cpp) + tests/test_ParallelSerialization.cpp) endif() if(CUDA_FOUND) list(APPEND TEST_SOURCE_FILES tests/test_cusparseSolver.cpp) diff --git a/ebos/eclmpiserializer.hh b/ebos/eclmpiserializer.hh index 37f67c39b..58bd8a8aa 100644 --- a/ebos/eclmpiserializer.hh +++ b/ebos/eclmpiserializer.hh @@ -21,8 +21,8 @@ #ifndef ECL_MPI_SERIALIZER_HH #define ECL_MPI_SERIALIZER_HH +#include #include -#include #include #include diff --git a/opm/simulators/utils/MPIPacker.cpp b/opm/simulators/utils/MPIPacker.cpp new file mode 100644 index 000000000..6874d6d5f --- /dev/null +++ b/opm/simulators/utils/MPIPacker.cpp @@ -0,0 +1,739 @@ +/* + Copyright 2019 Equinor AS. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#include +#include "MPIPacker.hpp" + +#include +#include +#include + +#include +#if HAVE_MPI +#include +#endif +#include + +namespace Opm +{ +namespace Mpi +{ +template +std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm, + std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm, + std::integral_constant) +{ +#if HAVE_MPI + int size; + MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); + std::size_t totalSize = size; + MPI_Pack_size(l, Dune::MPITraits::getType(), comm, &size); + return totalSize + size; +#else + (void) comm; + return l-l; +#endif +} + +template +std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm) +{ + return packSize(data, l, comm, typename std::is_pod::type()); +} + +template +std::size_t packSize(const std::pair& data, Opm::Parallel::MPIComm comm) +{ + return packSize(data.first, comm) + packSize(data.second, comm); +} + +template +std::size_t packSize(const std::optional& data, Opm::Parallel::MPIComm comm) +{ + bool has_value = data.has_value(); + std::size_t pack_size = packSize(has_value, comm); + if (has_value) + pack_size += packSize(*data, comm); + return pack_size; +} + + +template +std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm) +{ + if (std::is_pod::value) + // size written automatically + return packSize(data.data(), data.size(), comm); + + std::size_t size = packSize(data.size(), comm); + + for (const auto& entry: data) + size += packSize(entry, comm); + + return size; +} + +template +std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm) +{ + bool entry = false; + return packSize(data.size(), comm) + data.size()*packSize(entry,comm); +} + +template +typename std::enable_if::value, std::size_t>::type +pack_size_tuple_entry(const Tuple&, Opm::Parallel::MPIComm) +{ + return 0; +} + +template +typename std::enable_if::value, std::size_t>::type +pack_size_tuple_entry(const Tuple& tuple, Opm::Parallel::MPIComm comm) +{ + return packSize(std::get(tuple), comm) + pack_size_tuple_entry(tuple, comm); +} + +template +std::size_t packSize(const std::tuple& data, Opm::Parallel::MPIComm comm) +{ + return pack_size_tuple_entry(data, comm); +} + +template +std::size_t packSize(const std::unordered_set& data, + Opm::Parallel::MPIComm comm) +{ + std::size_t totalSize = packSize(data.size(), comm); + for (const auto& entry : data) + { + totalSize += packSize(entry, comm); + } + return totalSize; +} + +template +std::size_t packSize(const std::set& data, + Opm::Parallel::MPIComm comm) +{ + std::size_t totalSize = packSize(data.size(), comm); + for (const auto& entry : data) + { + totalSize += packSize(entry, comm); + } + return totalSize; +} + +std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm) +{ +#if HAVE_MPI + int size; + MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); + int totalSize = size; + MPI_Pack_size(strlen(str)+1, MPI_CHAR, comm, &size); + return totalSize + size; +#else + (void) str; + (void) comm; + return 0; +#endif +} + +std::size_t packSize(const std::string& str, Opm::Parallel::MPIComm comm) +{ + return packSize(str.c_str(), comm); +} + +template +std::size_t packSize(const std::map& data, Opm::Parallel::MPIComm comm) +{ + std::size_t totalSize = packSize(data.size(), comm); + for (const auto& entry: data) + { + totalSize += packSize(entry, comm); + } + return totalSize; +} + +template +std::size_t packSize(const std::unordered_map& data, Opm::Parallel::MPIComm comm) +{ + std::size_t totalSize = packSize(data.size(), comm); + for (const auto& entry: data) + { + totalSize += packSize(entry, comm); + } + return totalSize; +} + +template +std::size_t packSize(const std::array& data, Opm::Parallel::MPIComm comm) +{ + return N*packSize(data[0], comm); +} + +template +struct Packing +{ +}; + +template +struct Packing> +{ + static std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm) + { + return Mpi::packSize(data.to_ullong(), comm); + } + + static void pack(const std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) + { + Mpi::pack(data.to_ullong(), buffer, position, comm); + } + + static void unpack(std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) + { + unsigned long long d; + Mpi::unpack(d, buffer, position, comm); + data = std::bitset(d); + } +}; + +template +std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm) +{ + return Packing>::packSize(data, comm); +} + +std::size_t packSize(const Opm::time_point&, Opm::Parallel::MPIComm comm) +{ + std::time_t tp = 0; + return packSize(tp, comm); +} + + +////// pack routines + +template +void pack(const T*, std::size_t, std::vector&, int&, + Opm::Parallel::MPIComm, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm, + std::integral_constant) +{ +#if HAVE_MPI + MPI_Pack(&l, 1, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); + MPI_Pack(data, l, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); +#else + (void) data; + (void) comm; + (void) l; + (void) buffer; + (void) position; +#endif +} + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data, l, buffer, position, comm, typename std::is_pod::type()); +} + +template +void pack(const std::pair& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data.first, buffer, position, comm); + pack(data.second, buffer, position, comm); +} + +template +void pack(const std::optional& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + bool has_value = data.has_value(); + pack(has_value, buffer, position, comm); + if (has_value) + pack(*data, buffer, position, comm); +} + + +template +void pack(const std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + if (std::is_pod::value) + { + // size written automatically + pack(data.data(), data.size(), buffer, position, comm); + return; + } + + pack(data.size(), buffer, position, comm); + + for (const auto& entry: data) + pack(entry, buffer, position, comm); +} + +template +void pack(const std::set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data.size(), buffer, position, comm); + + for (const auto& entry : data) + { + pack(entry, buffer, position, comm); + } +} + +template +void pack(const std::unordered_set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data.size(), buffer, position, comm); + + for (const auto& entry : data) + { + pack(entry, buffer, position, comm); + } +} + +template +void pack(const std::array& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + for (const T& entry : data) + pack(entry, buffer, position, comm); +} + +template +void pack(const std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data.size(), buffer, position, comm); + for (const auto entry : data) { // Not a reference: vector range + bool b = entry; + pack(b, buffer, position, comm); + } +} + +template +typename std::enable_if::value, void>::type +pack_tuple_entry(const Tuple&, std::vector&, int&, + Opm::Parallel::MPIComm) +{ +} + +template +typename std::enable_if::value, void>::type +pack_tuple_entry(const Tuple& tuple, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm) +{ + pack(std::get(tuple), buffer, position, comm); + pack_tuple_entry(tuple, buffer, position, comm); +} + +template +void pack(const std::tuple& data, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm) +{ + pack_tuple_entry(data, buffer, position, comm); +} + +void pack(const char* str, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ +#if HAVE_MPI + std::size_t length = strlen(str)+1; + MPI_Pack(&length, 1, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); + MPI_Pack(str, strlen(str)+1, MPI_CHAR, buffer.data(), buffer.size(), + &position, comm); +#else + (void) str; + (void) comm; + (void) buffer; + (void) position; +#endif +} + +void pack(const std::string& str, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(str.c_str(), buffer, position, comm); +} + +template +void pack(const std::map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data.size(), buffer, position, comm); + + for (const auto& entry: data) + { + pack(entry, buffer, position, comm); + } +} + +template +void pack(const std::unordered_map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data.size(), buffer, position, comm); + + for (const auto& entry: data) + { + pack(entry, buffer, position, comm); + } +} + +template +void pack(const std::bitset& data, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm) +{ + Packing>::pack(data, buffer, position, comm); +} + +void pack(const Opm::time_point& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(Opm::TimeService::to_time_t(data), buffer, position, comm); +} + + +/// Mpi::unpack routines + +template +void unpack(T*, const std::size_t&, std::vector&, int&, + Opm::Parallel::MPIComm, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm, + std::integral_constant) +{ +#if HAVE_MPI + MPI_Unpack(buffer.data(), buffer.size(), &position, data, l, + Dune::MPITraits::getType(), comm); +#else + (void) data; + (void) comm; + (void) l; + (void) buffer; + (void) position; +#endif +} + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + unpack(data, l, buffer, position, comm, typename std::is_pod::type()); +} + +template +void unpack(std::pair& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + unpack(data.first, buffer, position, comm); + unpack(data.second, buffer, position, comm); +} + +template +void unpack(std::optional&data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + bool has_value; + unpack(has_value, buffer, position, comm); + if (has_value) { + T val; + unpack(val, buffer, position, comm); + data = std::optional(val); + } else + data.reset(); +} + + +template +void unpack(std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::size_t length = 0; + unpack(length, buffer, position, comm); + data.resize(length); + + if (std::is_pod::value) + { + unpack(data.data(), data.size(), buffer, position, comm); + return; + } + + for (auto& entry: data) + unpack(entry, buffer, position, comm); +} + +template +void unpack(std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + size_t size; + unpack(size, buffer, position, comm); + data.clear(); + data.reserve(size); + for (size_t i = 0; i < size; ++i) { + bool entry; + unpack(entry, buffer, position, comm); + data.push_back(entry); + } +} + +template +typename std::enable_if::value, void>::type +unpack_tuple_entry(Tuple&, std::vector&, int&, + Opm::Parallel::MPIComm) +{ +} + +template +typename std::enable_if::value, void>::type +unpack_tuple_entry(Tuple& tuple, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm) +{ + unpack(std::get(tuple), buffer, position, comm); + unpack_tuple_entry(tuple, buffer, position, comm); +} + +template +void unpack(std::tuple& data, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm) +{ + unpack_tuple_entry(data, buffer, position, comm); +} + +template +void unpack(std::set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::size_t size = 0; + unpack(size, buffer, position, comm); + + for (;size>0; size--) + { + K entry; + unpack(entry, buffer, position, comm); + data.insert(entry); + } +} + +template +void unpack(std::unordered_set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::size_t size=0; + unpack(size, buffer, position, comm); + + for (;size>0; size--) + { + T entry; + unpack(entry, buffer, position, comm); + data.insert(entry); + } +} + +template +void unpack(std::array& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + for (T& entry : data) + unpack(entry, buffer, position, comm); +} + +void unpack(char* str, std::size_t length, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ +#if HAVE_MPI + MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast(str), length, MPI_CHAR, comm); +#else + (void) str; + (void) comm; + (void) length; + (void) buffer; + (void) position; +#endif +} + +void unpack(std::string& str, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::size_t length=0; + unpack(length, buffer, position, comm); + std::vector cStr(length, '\0'); + unpack(cStr.data(), length, buffer, position, comm); + str.clear(); + str.append(cStr.data()); +} + +template +void unpack(std::map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::size_t size=0; + unpack(size, buffer, position, comm); + + for (;size>0; size--) + { + std::pair entry; + unpack(entry, buffer, position, comm); + data.insert(entry); + } +} + +template +void unpack(std::unordered_map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::size_t size=0; + unpack(size, buffer, position, comm); + + for (;size>0; size--) + { + std::pair entry; + unpack(entry, buffer, position, comm); + data.insert(entry); + } +} + +template +void unpack(std::bitset& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + Packing>::unpack(data, buffer, position, comm); +} + +void unpack([[maybe_unused]] Opm::time_point& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + std::time_t tp; + unpack(tp, buffer, position, comm); +#if HAVE_MPI + data = Opm::TimeService::from_time_t(tp); +#endif +} + + +#define INSTANTIATE_PACK_VECTOR(...) \ +template std::size_t packSize(const std::vector<__VA_ARGS__>& data, \ + Opm::Parallel::MPIComm comm); \ +template void pack(const std::vector<__VA_ARGS__>& data, \ + std::vector& buffer, int& position, \ + Opm::Parallel::MPIComm comm); \ +template void unpack(std::vector<__VA_ARGS__>& data, \ + std::vector& buffer, int& position, \ + Opm::Parallel::MPIComm comm); + +INSTANTIATE_PACK_VECTOR(float) +INSTANTIATE_PACK_VECTOR(double) +INSTANTIATE_PACK_VECTOR(std::vector) +INSTANTIATE_PACK_VECTOR(bool) +INSTANTIATE_PACK_VECTOR(char) +INSTANTIATE_PACK_VECTOR(int) +INSTANTIATE_PACK_VECTOR(unsigned char) +INSTANTIATE_PACK_VECTOR(unsigned int) +INSTANTIATE_PACK_VECTOR(unsigned long int) +INSTANTIATE_PACK_VECTOR(unsigned long long int) +INSTANTIATE_PACK_VECTOR(std::time_t) +INSTANTIATE_PACK_VECTOR(std::array) +INSTANTIATE_PACK_VECTOR(std::pair) +INSTANTIATE_PACK_VECTOR(std::map) +INSTANTIATE_PACK_VECTOR(std::pair>) +INSTANTIATE_PACK_VECTOR(std::pair>) +INSTANTIATE_PACK_VECTOR(std::pair>) +INSTANTIATE_PACK_VECTOR(std::string) + +#undef INSTANTIATE_PACK_VECTOR + +#undef INSTANTIATE_PACK_SET + +#define INSTANTIATE_PACK(...) \ +template std::size_t packSize(const __VA_ARGS__& data, \ + Opm::Parallel::MPIComm comm); \ +template void pack(const __VA_ARGS__& data, \ + std::vector& buffer, int& position, \ + Opm::Parallel::MPIComm comm); \ +template void unpack(__VA_ARGS__& data, \ + std::vector& buffer, int& position, \ + Opm::Parallel::MPIComm comm); + +INSTANTIATE_PACK(float) +INSTANTIATE_PACK(double) +INSTANTIATE_PACK(bool) +INSTANTIATE_PACK(int) +INSTANTIATE_PACK(unsigned char) +INSTANTIATE_PACK(unsigned int) +INSTANTIATE_PACK(unsigned long int) +INSTANTIATE_PACK(unsigned long long int) +INSTANTIATE_PACK(std::array) +INSTANTIATE_PACK(std::array) +INSTANTIATE_PACK(std::array) +INSTANTIATE_PACK(std::array) +INSTANTIATE_PACK(std::array) +INSTANTIATE_PACK(std::map,std::pair>) +INSTANTIATE_PACK(std::optional) +INSTANTIATE_PACK(std::optional) +INSTANTIATE_PACK(std::pair) +INSTANTIATE_PACK(std::optional>) +INSTANTIATE_PACK(std::map>) +INSTANTIATE_PACK(std::map,int>>) +INSTANTIATE_PACK(std::map) +INSTANTIATE_PACK(std::map) +INSTANTIATE_PACK(std::map) +INSTANTIATE_PACK(std::unordered_map) +INSTANTIATE_PACK(std::unordered_map, Opm::OrderedMapDetail::TruncatedStringEquals>) +INSTANTIATE_PACK(std::unordered_map, Opm::OrderedMapDetail::TruncatedStringEquals<8>>) +INSTANTIATE_PACK(std::unordered_map) +INSTANTIATE_PACK(std::unordered_set) +INSTANTIATE_PACK(std::set) +INSTANTIATE_PACK(std::bitset<4>) + + +#undef INSTANTIATE_PACK + +} // end namespace Mpi + +} // end namespace Opm diff --git a/opm/simulators/utils/MPIPacker.hpp b/opm/simulators/utils/MPIPacker.hpp new file mode 100644 index 000000000..ac837487c --- /dev/null +++ b/opm/simulators/utils/MPIPacker.hpp @@ -0,0 +1,373 @@ +/* + Copyright 2019 Equinor AS. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#ifndef MPI_SERIALIZER_HPP +#define MPI_SERIALIZER_HPP + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Opm +{ + +namespace Mpi +{ +template +std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm, + std::integral_constant); + +template +std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm, + std::integral_constant); + +template +std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const T&, Opm::Parallel::MPIComm, + std::integral_constant) +{ + std::string msg = std::string{"Packing not (yet) supported for non-pod type: "} + typeid(T).name(); + OPM_THROW(std::logic_error, msg); +} + +template +std::size_t packSize(const T&, Opm::Parallel::MPIComm comm, + std::integral_constant) +{ +#if HAVE_MPI + int size{}; + MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); + return size; +#else + (void) comm; + return 0; +#endif +} + +template +std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm) +{ + return packSize(data, comm, typename std::is_pod::type()); +} + +template +std::size_t packSize(const std::pair& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::optional& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::set& data, + Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::unordered_set& data, + Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::tuple& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::array& data, Opm::Parallel::MPIComm comm); + +std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::map& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::unordered_map& data, Opm::Parallel::MPIComm comm); + +template +std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm); + +////// pack routines + +template +void pack(const T*, std::size_t, std::vector&, int&, + Opm::Parallel::MPIComm, std::integral_constant); + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm, std::integral_constant); + +template +void pack(const T* data, std::size_t l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const T&, std::vector&, int&, + Opm::Parallel::MPIComm, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void pack(const T& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm, std::integral_constant) +{ +#if HAVE_MPI + MPI_Pack(&data, 1, Dune::MPITraits::getType(), buffer.data(), + buffer.size(), &position, comm); +#else + (void) data; + (void) comm; + (void) buffer; + (void) position; +#endif +} + +template +void pack(const T& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + pack(data, buffer, position, comm, typename std::is_pod::type()); +} + +template +void pack(const std::pair& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::optional& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::tuple& data, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm); + +template +void pack(const std::set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::unordered_set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::array& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::unordered_map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +void pack(const char* str, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void pack(const std::bitset& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +/// unpack routines + +template +void unpack(T*, const std::size_t&, std::vector&, int&, + Opm::Parallel::MPIComm, std::integral_constant); + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm, + std::integral_constant); + +template +void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(T&, std::vector&, int&, + Opm::Parallel::MPIComm, std::integral_constant) +{ + OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); +} + +template +void unpack(T& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm, std::integral_constant) +{ +#if HAVE_MPI + MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1, + Dune::MPITraits::getType(), comm); +#else + (void) data; + (void) comm; + (void) buffer; + (void) position; +#endif +} + +template +void unpack(T& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm) +{ + unpack(data, buffer, position, comm, typename std::is_pod::type()); +} + +template +void unpack(std::pair& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::optional& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::vector& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::tuple& data, std::vector& buffer, + int& position, Opm::Parallel::MPIComm comm); + +template +void unpack(std::set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::unordered_set& data, + std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::array& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::unordered_map& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +void unpack(char* str, std::size_t length, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +template +void unpack(std::bitset& data, std::vector& buffer, int& position, + Opm::Parallel::MPIComm comm); + +/// prototypes for complex types + +#define ADD_PACK_PROTOTYPES(T) \ + std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm); \ + void pack(const T& data, std::vector& buffer, int& position, \ + Opm::Parallel::MPIComm comm); \ + void unpack(T& data, std::vector& buffer, int& position, \ + Opm::Parallel::MPIComm comm); + +ADD_PACK_PROTOTYPES(std::string) +ADD_PACK_PROTOTYPES(time_point) + +template +void variadic_packsize(size_t& size, Parallel::Communication comm, T& first, Args&&... args) +{ + size += packSize(first, comm); + if constexpr (sizeof...(args) > 0) + variadic_packsize(size, comm, std::forward(args)...); +} + +template +void variadic_pack(int& pos, std::vector& buffer, Parallel::Communication comm, T& first, Args&&... args) +{ + pack(first, buffer, pos, comm); + if constexpr (sizeof...(args) > 0) + variadic_pack(pos, buffer, comm, std::forward(args)...); +} + +template +void variadic_unpack(int& pos, std::vector& buffer, Parallel::Communication comm, T& first, Args&&... args) +{ + unpack(first, buffer, pos, comm); + if constexpr (sizeof...(args) > 0) + variadic_unpack(pos, buffer, comm, std::forward(args)...); +} + +#if HAVE_MPI +template +void broadcast(Parallel::Communication comm, int root, Args&&... args) +{ + if (comm.size() == 1) + return; + + size_t size = 0; + if (comm.rank() == root) + variadic_packsize(size, comm, args...); + + comm.broadcast(&size, 1, root); + std::vector buffer(size); + if (comm.rank() == root) { + int pos = 0; + variadic_pack(pos, buffer, comm, args...); + } + comm.broadcast(buffer.data(), size, root); + if (comm.rank() != root) { + int pos = 0; + variadic_unpack(pos, buffer, comm, std::forward(args)...); + } +} +#else +template +void broadcast(Parallel::Communication, int, Args&&...) +{} +#endif + +} // end namespace Mpi + +} // end namespace Opm +#endif // MPI_SERIALIZER_HPP diff --git a/opm/simulators/utils/ParallelRestart.cpp b/opm/simulators/utils/ParallelRestart.cpp index ad82b6c70..bc83c7015 100644 --- a/opm/simulators/utils/ParallelRestart.cpp +++ b/opm/simulators/utils/ParallelRestart.cpp @@ -17,21 +17,16 @@ along with OPM. If not, see . */ #include +#include "ParallelRestart.hpp" + #if HAVE_MPI #include #endif -#include "ParallelRestart.hpp" -#include -#include -#include -#include -#include - -#include #if HAVE_MPI #include #endif + #include #include #include @@ -39,713 +34,10 @@ namespace Opm { -namespace Mpi -{ -template -std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm, - std::integral_constant) -{ - OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); -} -template -std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm, - std::integral_constant) -{ -#if HAVE_MPI - int size; - MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); - std::size_t totalSize = size; - MPI_Pack_size(l, Dune::MPITraits::getType(), comm, &size); - return totalSize + size; -#else - (void) comm; - return l-l; -#endif -} - -template -std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm) -{ - return packSize(data, l, comm, typename std::is_pod::type()); -} - -template -std::size_t packSize(const std::pair& data, Opm::Parallel::MPIComm comm) -{ - return packSize(data.first, comm) + packSize(data.second, comm); -} - -template -std::size_t packSize(const std::optional& data, Opm::Parallel::MPIComm comm) -{ - bool has_value = data.has_value(); - std::size_t pack_size = packSize(has_value, comm); - if (has_value) - pack_size += packSize(*data, comm); - return pack_size; -} - - -template -std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm) -{ - if (std::is_pod::value) - // size written automatically - return packSize(data.data(), data.size(), comm); - - std::size_t size = packSize(data.size(), comm); - - for (const auto& entry: data) - size += packSize(entry, comm); - - return size; -} - -template -std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm) -{ - bool entry = false; - return packSize(data.size(), comm) + data.size()*packSize(entry,comm); -} - -template -typename std::enable_if::value, std::size_t>::type -pack_size_tuple_entry(const Tuple&, Opm::Parallel::MPIComm) -{ - return 0; -} - -template -typename std::enable_if::value, std::size_t>::type -pack_size_tuple_entry(const Tuple& tuple, Opm::Parallel::MPIComm comm) -{ - return packSize(std::get(tuple), comm) + pack_size_tuple_entry(tuple, comm); -} - -template -std::size_t packSize(const std::tuple& data, Opm::Parallel::MPIComm comm) -{ - return pack_size_tuple_entry(data, comm); -} - -template -std::size_t packSize(const std::unordered_set& data, - Opm::Parallel::MPIComm comm) -{ - std::size_t totalSize = packSize(data.size(), comm); - for (const auto& entry : data) - { - totalSize += packSize(entry, comm); - } - return totalSize; -} - -template -std::size_t packSize(const std::set& data, - Opm::Parallel::MPIComm comm) -{ - std::size_t totalSize = packSize(data.size(), comm); - for (const auto& entry : data) - { - totalSize += packSize(entry, comm); - } - return totalSize; -} - -std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm) -{ -#if HAVE_MPI - int size; - MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); - int totalSize = size; - MPI_Pack_size(strlen(str)+1, MPI_CHAR, comm, &size); - return totalSize + size; -#else - (void) str; - (void) comm; - return 0; -#endif -} - -std::size_t packSize(const std::string& str, Opm::Parallel::MPIComm comm) -{ - return packSize(str.c_str(), comm); -} - -template -std::size_t packSize(const std::map& data, Opm::Parallel::MPIComm comm) -{ - std::size_t totalSize = packSize(data.size(), comm); - for (const auto& entry: data) - { - totalSize += packSize(entry, comm); - } - return totalSize; -} - -template -std::size_t packSize(const std::unordered_map& data, Opm::Parallel::MPIComm comm) -{ - std::size_t totalSize = packSize(data.size(), comm); - for (const auto& entry: data) - { - totalSize += packSize(entry, comm); - } - return totalSize; -} - -template -std::size_t packSize(const std::array& data, Opm::Parallel::MPIComm comm) -{ - return N*packSize(data[0], comm); -} - -template -struct Packing -{ -}; - -template -struct Packing> -{ - static std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm) - { - return Mpi::packSize(data.to_ullong(), comm); - } - - static void pack(const std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) - { - Mpi::pack(data.to_ullong(), buffer, position, comm); - } - - static void unpack(std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) - { - unsigned long long d; - Mpi::unpack(d, buffer, position, comm); - data = std::bitset(d); - } -}; - -template -std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm) -{ - return Packing>::packSize(data, comm); -} - -std::size_t packSize(const Opm::time_point&, Opm::Parallel::MPIComm comm) -{ - std::time_t tp = 0; - return packSize(tp, comm); -} - - -////// pack routines - -template -void pack(const T*, std::size_t, std::vector&, int&, - Opm::Parallel::MPIComm, std::integral_constant) -{ - OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); -} - -template -void pack(const T* data, std::size_t l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm, - std::integral_constant) -{ -#if HAVE_MPI - MPI_Pack(&l, 1, Dune::MPITraits::getType(), buffer.data(), - buffer.size(), &position, comm); - MPI_Pack(data, l, Dune::MPITraits::getType(), buffer.data(), - buffer.size(), &position, comm); -#else - (void) data; - (void) comm; - (void) l; - (void) buffer; - (void) position; -#endif -} - -template -void pack(const T* data, std::size_t l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data, l, buffer, position, comm, typename std::is_pod::type()); -} - -template -void pack(const std::pair& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data.first, buffer, position, comm); - pack(data.second, buffer, position, comm); -} - -template -void pack(const std::optional& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - bool has_value = data.has_value(); - pack(has_value, buffer, position, comm); - if (has_value) - pack(*data, buffer, position, comm); -} - - -template -void pack(const std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - if (std::is_pod::value) - { - // size written automatically - pack(data.data(), data.size(), buffer, position, comm); - return; - } - - pack(data.size(), buffer, position, comm); - - for (const auto& entry: data) - pack(entry, buffer, position, comm); -} - -template -void pack(const std::set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data.size(), buffer, position, comm); - - for (const auto& entry : data) - { - pack(entry, buffer, position, comm); - } -} - -template -void pack(const std::unordered_set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data.size(), buffer, position, comm); - - for (const auto& entry : data) - { - pack(entry, buffer, position, comm); - } -} - -template -void pack(const std::array& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - for (const T& entry : data) - pack(entry, buffer, position, comm); -} - -template -void pack(const std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data.size(), buffer, position, comm); - for (const auto entry : data) { // Not a reference: vector range - bool b = entry; - pack(b, buffer, position, comm); - } -} - -template -typename std::enable_if::value, void>::type -pack_tuple_entry(const Tuple&, std::vector&, int&, - Opm::Parallel::MPIComm) -{ -} - -template -typename std::enable_if::value, void>::type -pack_tuple_entry(const Tuple& tuple, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm) -{ - pack(std::get(tuple), buffer, position, comm); - pack_tuple_entry(tuple, buffer, position, comm); -} - -template -void pack(const std::tuple& data, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm) -{ - pack_tuple_entry(data, buffer, position, comm); -} - -void pack(const char* str, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ -#if HAVE_MPI - std::size_t length = strlen(str)+1; - MPI_Pack(&length, 1, Dune::MPITraits::getType(), buffer.data(), - buffer.size(), &position, comm); - MPI_Pack(str, strlen(str)+1, MPI_CHAR, buffer.data(), buffer.size(), - &position, comm); -#else - (void) str; - (void) comm; - (void) buffer; - (void) position; -#endif -} - -void pack(const std::string& str, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(str.c_str(), buffer, position, comm); -} - -template -void pack(const std::map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data.size(), buffer, position, comm); - - for (const auto& entry: data) - { - pack(entry, buffer, position, comm); - } -} - -template -void pack(const std::unordered_map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data.size(), buffer, position, comm); - - for (const auto& entry: data) - { - pack(entry, buffer, position, comm); - } -} - -template -void pack(const std::bitset& data, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm) -{ - Packing>::pack(data, buffer, position, comm); -} - -void pack(const Opm::time_point& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(Opm::TimeService::to_time_t(data), buffer, position, comm); -} - - -/// Mpi::unpack routines - -template -void unpack(T*, const std::size_t&, std::vector&, int&, - Opm::Parallel::MPIComm, std::integral_constant) -{ - OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); -} - -template -void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm, - std::integral_constant) -{ -#if HAVE_MPI - MPI_Unpack(buffer.data(), buffer.size(), &position, data, l, - Dune::MPITraits::getType(), comm); -#else - (void) data; - (void) comm; - (void) l; - (void) buffer; - (void) position; -#endif -} - -template -void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - unpack(data, l, buffer, position, comm, typename std::is_pod::type()); -} - -template -void unpack(std::pair& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - unpack(data.first, buffer, position, comm); - unpack(data.second, buffer, position, comm); -} - -template -void unpack(std::optional&data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - bool has_value; - unpack(has_value, buffer, position, comm); - if (has_value) { - T val; - unpack(val, buffer, position, comm); - data = std::optional(val); - } else - data.reset(); -} - - -template -void unpack(std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::size_t length = 0; - unpack(length, buffer, position, comm); - data.resize(length); - - if (std::is_pod::value) - { - unpack(data.data(), data.size(), buffer, position, comm); - return; - } - - for (auto& entry: data) - unpack(entry, buffer, position, comm); -} - -template -void unpack(std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - size_t size; - unpack(size, buffer, position, comm); - data.clear(); - data.reserve(size); - for (size_t i = 0; i < size; ++i) { - bool entry; - unpack(entry, buffer, position, comm); - data.push_back(entry); - } -} - -template -typename std::enable_if::value, void>::type -unpack_tuple_entry(Tuple&, std::vector&, int&, - Opm::Parallel::MPIComm) -{ -} - -template -typename std::enable_if::value, void>::type -unpack_tuple_entry(Tuple& tuple, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm) -{ - unpack(std::get(tuple), buffer, position, comm); - unpack_tuple_entry(tuple, buffer, position, comm); -} - -template -void unpack(std::tuple& data, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm) -{ - unpack_tuple_entry(data, buffer, position, comm); -} - -template -void unpack(std::set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::size_t size = 0; - unpack(size, buffer, position, comm); - - for (;size>0; size--) - { - K entry; - unpack(entry, buffer, position, comm); - data.insert(entry); - } -} - -template -void unpack(std::unordered_set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::size_t size=0; - unpack(size, buffer, position, comm); - - for (;size>0; size--) - { - T entry; - unpack(entry, buffer, position, comm); - data.insert(entry); - } -} - -template -void unpack(std::array& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - for (T& entry : data) - unpack(entry, buffer, position, comm); -} - -void unpack(char* str, std::size_t length, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ -#if HAVE_MPI - MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast(str), length, MPI_CHAR, comm); -#else - (void) str; - (void) comm; - (void) length; - (void) buffer; - (void) position; -#endif -} - -void unpack(std::string& str, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::size_t length=0; - unpack(length, buffer, position, comm); - std::vector cStr(length, '\0'); - unpack(cStr.data(), length, buffer, position, comm); - str.clear(); - str.append(cStr.data()); -} - -template -void unpack(std::map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::size_t size=0; - unpack(size, buffer, position, comm); - - for (;size>0; size--) - { - std::pair entry; - unpack(entry, buffer, position, comm); - data.insert(entry); - } -} - -template -void unpack(std::unordered_map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::size_t size=0; - unpack(size, buffer, position, comm); - - for (;size>0; size--) - { - std::pair entry; - unpack(entry, buffer, position, comm); - data.insert(entry); - } -} - -template -void unpack(std::bitset& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - Packing>::unpack(data, buffer, position, comm); -} - -void unpack([[maybe_unused]] Opm::time_point& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - std::time_t tp; - unpack(tp, buffer, position, comm); -#if HAVE_MPI - data = Opm::TimeService::from_time_t(tp); -#endif -} - - -#define INSTANTIATE_PACK_VECTOR(...) \ -template std::size_t packSize(const std::vector<__VA_ARGS__>& data, \ - Opm::Parallel::MPIComm comm); \ -template void pack(const std::vector<__VA_ARGS__>& data, \ - std::vector& buffer, int& position, \ - Opm::Parallel::MPIComm comm); \ -template void unpack(std::vector<__VA_ARGS__>& data, \ - std::vector& buffer, int& position, \ - Opm::Parallel::MPIComm comm); - -INSTANTIATE_PACK_VECTOR(float) -INSTANTIATE_PACK_VECTOR(double) -INSTANTIATE_PACK_VECTOR(std::vector) -INSTANTIATE_PACK_VECTOR(bool) -INSTANTIATE_PACK_VECTOR(char) -INSTANTIATE_PACK_VECTOR(int) -INSTANTIATE_PACK_VECTOR(unsigned char) -INSTANTIATE_PACK_VECTOR(unsigned int) -INSTANTIATE_PACK_VECTOR(unsigned long int) -INSTANTIATE_PACK_VECTOR(unsigned long long int) -INSTANTIATE_PACK_VECTOR(std::time_t) -INSTANTIATE_PACK_VECTOR(std::array) -INSTANTIATE_PACK_VECTOR(std::pair) -INSTANTIATE_PACK_VECTOR(std::map) -INSTANTIATE_PACK_VECTOR(std::pair>) -INSTANTIATE_PACK_VECTOR(std::pair>) -INSTANTIATE_PACK_VECTOR(std::pair>) -INSTANTIATE_PACK_VECTOR(std::string) - -#undef INSTANTIATE_PACK_VECTOR - -#undef INSTANTIATE_PACK_SET - -#define INSTANTIATE_PACK(...) \ -template std::size_t packSize(const __VA_ARGS__& data, \ - Opm::Parallel::MPIComm comm); \ -template void pack(const __VA_ARGS__& data, \ - std::vector& buffer, int& position, \ - Opm::Parallel::MPIComm comm); \ -template void unpack(__VA_ARGS__& data, \ - std::vector& buffer, int& position, \ - Opm::Parallel::MPIComm comm); - -INSTANTIATE_PACK(float) -INSTANTIATE_PACK(double) -INSTANTIATE_PACK(bool) -INSTANTIATE_PACK(int) -INSTANTIATE_PACK(unsigned char) -INSTANTIATE_PACK(unsigned int) -INSTANTIATE_PACK(unsigned long int) -INSTANTIATE_PACK(unsigned long long int) -INSTANTIATE_PACK(std::array) -INSTANTIATE_PACK(std::array) -INSTANTIATE_PACK(std::array) -INSTANTIATE_PACK(std::array) -INSTANTIATE_PACK(std::array) -INSTANTIATE_PACK(std::map,std::pair>) -INSTANTIATE_PACK(std::optional) -INSTANTIATE_PACK(std::optional) -INSTANTIATE_PACK(std::pair) -INSTANTIATE_PACK(std::optional>) -INSTANTIATE_PACK(std::map>) -INSTANTIATE_PACK(std::map,int>>) -INSTANTIATE_PACK(std::map) -INSTANTIATE_PACK(std::map) -INSTANTIATE_PACK(std::map) -INSTANTIATE_PACK(std::map) -INSTANTIATE_PACK(std::unordered_map) -INSTANTIATE_PACK(std::unordered_map, Opm::OrderedMapDetail::TruncatedStringEquals>) -INSTANTIATE_PACK(std::unordered_map, Opm::OrderedMapDetail::TruncatedStringEquals<8>>) -INSTANTIATE_PACK(std::unordered_map) -INSTANTIATE_PACK(std::unordered_set) -INSTANTIATE_PACK(std::set) -INSTANTIATE_PACK(std::bitset<4>) - - -#undef INSTANTIATE_PACK - -} // end namespace Mpi - -RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState, +RestartValue loadParallelRestart(const EclipseIO* eclIO, + Action::State& actionState, + SummaryState& summaryState, const std::vector& solutionKeys, const std::vector& extraKeys, Parallel::Communication comm) diff --git a/opm/simulators/utils/ParallelRestart.hpp b/opm/simulators/utils/ParallelRestart.hpp index ba8c3de79..f90cdd34d 100644 --- a/opm/simulators/utils/ParallelRestart.hpp +++ b/opm/simulators/utils/ParallelRestart.hpp @@ -19,29 +19,8 @@ #ifndef PARALLEL_RESTART_HPP #define PARALLEL_RESTART_HPP -#if HAVE_MPI -#include -#endif - -#include -#include - -#include -#include - #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include namespace Opm @@ -57,340 +36,13 @@ namespace Action class State; } -namespace Mpi -{ -template -std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm, - std::integral_constant); - -template -std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm, - std::integral_constant); - -template -std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const T&, Opm::Parallel::MPIComm, - std::integral_constant) -{ - std::string msg = std::string{"Packing not (yet) supported for non-pod type: "} + typeid(T).name(); - OPM_THROW(std::logic_error, msg); -} - -template -std::size_t packSize(const T&, Opm::Parallel::MPIComm comm, - std::integral_constant) -{ -#if HAVE_MPI - int size{}; - MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); - return size; -#else - (void) comm; - return 0; -#endif -} - -template -std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm) -{ - return packSize(data, comm, typename std::is_pod::type()); -} - -template -std::size_t packSize(const std::pair& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::optional& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::set& data, - Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::unordered_set& data, - Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::tuple& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::array& data, Opm::Parallel::MPIComm comm); - -std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::map& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::unordered_map& data, Opm::Parallel::MPIComm comm); - -template -std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm); - -////// pack routines - -template -void pack(const T*, std::size_t, std::vector&, int&, - Opm::Parallel::MPIComm, std::integral_constant); - -template -void pack(const T* data, std::size_t l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm, std::integral_constant); - -template -void pack(const T* data, std::size_t l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const T&, std::vector&, int&, - Opm::Parallel::MPIComm, std::integral_constant) -{ - OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); -} - -template -void pack(const T& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm, std::integral_constant) -{ -#if HAVE_MPI - MPI_Pack(&data, 1, Dune::MPITraits::getType(), buffer.data(), - buffer.size(), &position, comm); -#else - (void) data; - (void) comm; - (void) buffer; - (void) position; -#endif -} - -template -void pack(const T& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - pack(data, buffer, position, comm, typename std::is_pod::type()); -} - -template -void pack(const std::pair& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::optional& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::tuple& data, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm); - -template -void pack(const std::set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::unordered_set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::array& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::unordered_map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -void pack(const char* str, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void pack(const std::bitset& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -/// unpack routines - -template -void unpack(T*, const std::size_t&, std::vector&, int&, - Opm::Parallel::MPIComm, std::integral_constant); - -template -void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm, - std::integral_constant); - -template -void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(T&, std::vector&, int&, - Opm::Parallel::MPIComm, std::integral_constant) -{ - OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); -} - -template -void unpack(T& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm, std::integral_constant) -{ -#if HAVE_MPI - MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1, - Dune::MPITraits::getType(), comm); -#else - (void) data; - (void) comm; - (void) buffer; - (void) position; -#endif -} - -template -void unpack(T& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm) -{ - unpack(data, buffer, position, comm, typename std::is_pod::type()); -} - -template -void unpack(std::pair& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::optional& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::vector& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::tuple& data, std::vector& buffer, - int& position, Opm::Parallel::MPIComm comm); - -template -void unpack(std::set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::unordered_set& data, - std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::array& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::unordered_map& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -void unpack(char* str, std::size_t length, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -template -void unpack(std::bitset& data, std::vector& buffer, int& position, - Opm::Parallel::MPIComm comm); - -/// prototypes for complex types - -#define ADD_PACK_PROTOTYPES(T) \ - std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm); \ - void pack(const T& data, std::vector& buffer, int& position, \ - Opm::Parallel::MPIComm comm); \ - void unpack(T& data, std::vector& buffer, int& position, \ - Opm::Parallel::MPIComm comm); - -ADD_PACK_PROTOTYPES(std::string) -ADD_PACK_PROTOTYPES(time_point) - -template -void variadic_packsize(size_t& size, Parallel::Communication comm, T& first, Args&&... args) -{ - size += packSize(first, comm); - if constexpr (sizeof...(args) > 0) - variadic_packsize(size, comm, std::forward(args)...); -} - -template -void variadic_pack(int& pos, std::vector& buffer, Parallel::Communication comm, T& first, Args&&... args) -{ - pack(first, buffer, pos, comm); - if constexpr (sizeof...(args) > 0) - variadic_pack(pos, buffer, comm, std::forward(args)...); -} - -template -void variadic_unpack(int& pos, std::vector& buffer, Parallel::Communication comm, T& first, Args&&... args) -{ - unpack(first, buffer, pos, comm); - if constexpr (sizeof...(args) > 0) - variadic_unpack(pos, buffer, comm, std::forward(args)...); -} - -#if HAVE_MPI -template -void broadcast(Parallel::Communication comm, int root, Args&&... args) -{ - if (comm.size() == 1) - return; - - size_t size = 0; - if (comm.rank() == root) - variadic_packsize(size, comm, std::forward(args)...); - - comm.broadcast(&size, 1, root); - std::vector buffer(size); - if (comm.rank() == root) { - int pos = 0; - variadic_pack(pos, buffer, comm, std::forward(args)...); - } - comm.broadcast(buffer.data(), size, root); - if (comm.rank() != root) { - int pos = 0; - variadic_unpack(pos, buffer, comm, std::forward(args)...); - } -} -#else -template -void broadcast(Parallel::Communication, int, Args&&...) -{} -#endif - -} // end namespace Mpi - -RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState, +RestartValue loadParallelRestart(const EclipseIO* eclIO, + Action::State& actionState, + SummaryState& summaryState, const std::vector& solutionKeys, const std::vector& extraKeys, Parallel::Communication comm); } // end namespace Opm + #endif // PARALLEL_RESTART_HPP diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index e6a54f44e..5f8b41d35 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -26,6 +26,7 @@ #include #include +#include #include #include diff --git a/tests/test_ParallelRestart.cpp b/tests/test_ParallelSerialization.cpp similarity index 99% rename from tests/test_ParallelRestart.cpp rename to tests/test_ParallelSerialization.cpp index ea9316083..5bebbc5d6 100644 --- a/tests/test_ParallelRestart.cpp +++ b/tests/test_ParallelSerialization.cpp @@ -19,7 +19,7 @@ #include -#define BOOST_TEST_MODULE TestParallelRestart +#define BOOST_TEST_MODULE TestParallelSerialization #define BOOST_TEST_NO_MAIN #include diff --git a/tests/test_broadcast.cpp b/tests/test_broadcast.cpp index 0fce1d833..65bf45387 100644 --- a/tests/test_broadcast.cpp +++ b/tests/test_broadcast.cpp @@ -25,7 +25,7 @@ #include -#include +#include #include #include