/* Copyright 2019 Equinor AS. This file is part of the Open Porous Media project (OPM). OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OPM. If not, see . */ #include #if HAVE_MPI #include #endif #include "ParallelRestart.hpp" #include #include #include #include #include #include #if HAVE_MPI #include #endif #include #include #include #include namespace Opm { namespace Mpi { template std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm, std::integral_constant) { OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); } template std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm, std::integral_constant) { #if HAVE_MPI int size; MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); std::size_t totalSize = size; MPI_Pack_size(l, Dune::MPITraits::getType(), comm, &size); return totalSize + size; #else (void) comm; return l-l; #endif } template std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm) { return packSize(data, l, comm, typename std::is_pod::type()); } template std::size_t packSize(const std::pair& data, Opm::Parallel::MPIComm comm) { return packSize(data.first, comm) + packSize(data.second, comm); } template std::size_t packSize(const std::optional& data, Opm::Parallel::MPIComm comm) { bool has_value = data.has_value(); std::size_t pack_size = packSize(has_value, comm); if (has_value) pack_size += packSize(*data, comm); return pack_size; } template std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm) { if (std::is_pod::value) // size written automatically return packSize(data.data(), data.size(), comm); std::size_t size = packSize(data.size(), comm); for (const auto& entry: data) size += packSize(entry, comm); return size; } template std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm) { bool entry = false; return packSize(data.size(), comm) + data.size()*packSize(entry,comm); } template typename std::enable_if::value, std::size_t>::type pack_size_tuple_entry(const Tuple&, Opm::Parallel::MPIComm) { return 0; } template typename std::enable_if::value, std::size_t>::type pack_size_tuple_entry(const Tuple& tuple, Opm::Parallel::MPIComm comm) { return packSize(std::get(tuple), comm) + pack_size_tuple_entry(tuple, comm); } template std::size_t packSize(const std::tuple& data, Opm::Parallel::MPIComm comm) { return pack_size_tuple_entry(data, comm); } template std::size_t packSize(const std::unordered_set& data, Opm::Parallel::MPIComm comm) { std::size_t totalSize = packSize(data.size(), comm); for (const auto& entry : data) { totalSize += packSize(entry, comm); } return totalSize; } template std::size_t packSize(const std::set& data, Opm::Parallel::MPIComm comm) { std::size_t totalSize = packSize(data.size(), comm); for (const auto& entry : data) { totalSize += packSize(entry, comm); } return totalSize; } std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm) { #if HAVE_MPI int size; MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size); int totalSize = size; MPI_Pack_size(strlen(str)+1, MPI_CHAR, comm, &size); return totalSize + size; #else (void) str; (void) comm; return 0; #endif } std::size_t packSize(const std::string& str, Opm::Parallel::MPIComm comm) { return packSize(str.c_str(), comm); } template std::size_t packSize(const std::map& data, Opm::Parallel::MPIComm comm) { std::size_t totalSize = packSize(data.size(), comm); for (const auto& entry: data) { totalSize += packSize(entry, comm); } return totalSize; } template std::size_t packSize(const std::unordered_map& data, Opm::Parallel::MPIComm comm) { std::size_t totalSize = packSize(data.size(), comm); for (const auto& entry: data) { totalSize += packSize(entry, comm); } return totalSize; } template std::size_t packSize(const std::array& data, Opm::Parallel::MPIComm comm) { return N*packSize(data[0], comm); } template struct Packing { }; template struct Packing> { static std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm) { return Mpi::packSize(data.to_ullong(), comm); } static void pack(const std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { Mpi::pack(data.to_ullong(), buffer, position, comm); } static void unpack(std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { unsigned long long d; Mpi::unpack(d, buffer, position, comm); data = std::bitset(d); } }; template std::size_t packSize(const std::bitset& data, Opm::Parallel::MPIComm comm) { return Packing>::packSize(data, comm); } std::size_t packSize(const Opm::time_point&, Opm::Parallel::MPIComm comm) { std::time_t tp = 0; return packSize(tp, comm); } ////// pack routines template void pack(const T*, std::size_t, std::vector&, int&, Opm::Parallel::MPIComm, std::integral_constant) { OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); } template void pack(const T* data, std::size_t l, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm, std::integral_constant) { #if HAVE_MPI MPI_Pack(&l, 1, Dune::MPITraits::getType(), buffer.data(), buffer.size(), &position, comm); MPI_Pack(data, l, Dune::MPITraits::getType(), buffer.data(), buffer.size(), &position, comm); #else (void) data; (void) comm; (void) l; (void) buffer; (void) position; #endif } template void pack(const T* data, std::size_t l, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data, l, buffer, position, comm, typename std::is_pod::type()); } template void pack(const std::pair& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data.first, buffer, position, comm); pack(data.second, buffer, position, comm); } template void pack(const std::optional& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { bool has_value = data.has_value(); pack(has_value, buffer, position, comm); if (has_value) pack(*data, buffer, position, comm); } template void pack(const std::vector& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { if (std::is_pod::value) { // size written automatically pack(data.data(), data.size(), buffer, position, comm); return; } pack(data.size(), buffer, position, comm); for (const auto& entry: data) pack(entry, buffer, position, comm); } template void pack(const std::set& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data.size(), buffer, position, comm); for (const auto& entry : data) { pack(entry, buffer, position, comm); } } template void pack(const std::unordered_set& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data.size(), buffer, position, comm); for (const auto& entry : data) { pack(entry, buffer, position, comm); } } template void pack(const std::array& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { for (const T& entry : data) pack(entry, buffer, position, comm); } template void pack(const std::vector& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data.size(), buffer, position, comm); for (const auto entry : data) { // Not a reference: vector range bool b = entry; pack(b, buffer, position, comm); } } template typename std::enable_if::value, void>::type pack_tuple_entry(const Tuple&, std::vector&, int&, Opm::Parallel::MPIComm) { } template typename std::enable_if::value, void>::type pack_tuple_entry(const Tuple& tuple, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(std::get(tuple), buffer, position, comm); pack_tuple_entry(tuple, buffer, position, comm); } template void pack(const std::tuple& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack_tuple_entry(data, buffer, position, comm); } void pack(const char* str, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { #if HAVE_MPI std::size_t length = strlen(str)+1; MPI_Pack(&length, 1, Dune::MPITraits::getType(), buffer.data(), buffer.size(), &position, comm); MPI_Pack(str, strlen(str)+1, MPI_CHAR, buffer.data(), buffer.size(), &position, comm); #else (void) str; (void) comm; (void) buffer; (void) position; #endif } void pack(const std::string& str, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(str.c_str(), buffer, position, comm); } template void pack(const std::map& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data.size(), buffer, position, comm); for (const auto& entry: data) { pack(entry, buffer, position, comm); } } template void pack(const std::unordered_map& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(data.size(), buffer, position, comm); for (const auto& entry: data) { pack(entry, buffer, position, comm); } } template void pack(const std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { Packing>::pack(data, buffer, position, comm); } void pack(const Opm::time_point& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { pack(Opm::TimeService::to_time_t(data), buffer, position, comm); } /// Mpi::unpack routines template void unpack(T*, const std::size_t&, std::vector&, int&, Opm::Parallel::MPIComm, std::integral_constant) { OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type."); } template void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm, std::integral_constant) { #if HAVE_MPI MPI_Unpack(buffer.data(), buffer.size(), &position, data, l, Dune::MPITraits::getType(), comm); #else (void) data; (void) comm; (void) l; (void) buffer; (void) position; #endif } template void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { unpack(data, l, buffer, position, comm, typename std::is_pod::type()); } template void unpack(std::pair& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { unpack(data.first, buffer, position, comm); unpack(data.second, buffer, position, comm); } template void unpack(std::optional&data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { bool has_value; unpack(has_value, buffer, position, comm); if (has_value) { T val; unpack(val, buffer, position, comm); data = std::optional(val); } else data.reset(); } template void unpack(std::vector& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::size_t length = 0; unpack(length, buffer, position, comm); data.resize(length); if (std::is_pod::value) { unpack(data.data(), data.size(), buffer, position, comm); return; } for (auto& entry: data) unpack(entry, buffer, position, comm); } template void unpack(std::vector& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { size_t size; unpack(size, buffer, position, comm); data.clear(); data.reserve(size); for (size_t i = 0; i < size; ++i) { bool entry; unpack(entry, buffer, position, comm); data.push_back(entry); } } template typename std::enable_if::value, void>::type unpack_tuple_entry(Tuple&, std::vector&, int&, Opm::Parallel::MPIComm) { } template typename std::enable_if::value, void>::type unpack_tuple_entry(Tuple& tuple, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { unpack(std::get(tuple), buffer, position, comm); unpack_tuple_entry(tuple, buffer, position, comm); } template void unpack(std::tuple& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { unpack_tuple_entry(data, buffer, position, comm); } template void unpack(std::set& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::size_t size = 0; unpack(size, buffer, position, comm); for (;size>0; size--) { K entry; unpack(entry, buffer, position, comm); data.insert(entry); } } template void unpack(std::unordered_set& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::size_t size=0; unpack(size, buffer, position, comm); for (;size>0; size--) { T entry; unpack(entry, buffer, position, comm); data.insert(entry); } } template void unpack(std::array& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { for (T& entry : data) unpack(entry, buffer, position, comm); } void unpack(char* str, std::size_t length, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { #if HAVE_MPI MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast(str), length, MPI_CHAR, comm); #else (void) str; (void) comm; (void) length; (void) buffer; (void) position; #endif } void unpack(std::string& str, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::size_t length=0; unpack(length, buffer, position, comm); std::vector cStr(length, '\0'); unpack(cStr.data(), length, buffer, position, comm); str.clear(); str.append(cStr.data()); } template void unpack(std::map& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::size_t size=0; unpack(size, buffer, position, comm); for (;size>0; size--) { std::pair entry; unpack(entry, buffer, position, comm); data.insert(entry); } } template void unpack(std::unordered_map& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::size_t size=0; unpack(size, buffer, position, comm); for (;size>0; size--) { std::pair entry; unpack(entry, buffer, position, comm); data.insert(entry); } } template void unpack(std::bitset& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { Packing>::unpack(data, buffer, position, comm); } void unpack([[maybe_unused]] Opm::time_point& data, std::vector& buffer, int& position, Opm::Parallel::MPIComm comm) { std::time_t tp; unpack(tp, buffer, position, comm); #if HAVE_MPI data = Opm::TimeService::from_time_t(tp); #endif } #define INSTANTIATE_PACK_VECTOR(...) \ template std::size_t packSize(const std::vector<__VA_ARGS__>& data, \ Opm::Parallel::MPIComm comm); \ template void pack(const std::vector<__VA_ARGS__>& data, \ std::vector& buffer, int& position, \ Opm::Parallel::MPIComm comm); \ template void unpack(std::vector<__VA_ARGS__>& data, \ std::vector& buffer, int& position, \ Opm::Parallel::MPIComm comm); INSTANTIATE_PACK_VECTOR(float) INSTANTIATE_PACK_VECTOR(double) INSTANTIATE_PACK_VECTOR(std::vector) INSTANTIATE_PACK_VECTOR(bool) INSTANTIATE_PACK_VECTOR(char) INSTANTIATE_PACK_VECTOR(int) INSTANTIATE_PACK_VECTOR(unsigned char) INSTANTIATE_PACK_VECTOR(unsigned int) INSTANTIATE_PACK_VECTOR(unsigned long int) INSTANTIATE_PACK_VECTOR(unsigned long long int) INSTANTIATE_PACK_VECTOR(std::time_t) INSTANTIATE_PACK_VECTOR(std::array) INSTANTIATE_PACK_VECTOR(std::pair) INSTANTIATE_PACK_VECTOR(std::map) INSTANTIATE_PACK_VECTOR(std::pair>) INSTANTIATE_PACK_VECTOR(std::pair>) INSTANTIATE_PACK_VECTOR(std::pair>) INSTANTIATE_PACK_VECTOR(std::string) #undef INSTANTIATE_PACK_VECTOR #undef INSTANTIATE_PACK_SET #define INSTANTIATE_PACK(...) \ template std::size_t packSize(const __VA_ARGS__& data, \ Opm::Parallel::MPIComm comm); \ template void pack(const __VA_ARGS__& data, \ std::vector& buffer, int& position, \ Opm::Parallel::MPIComm comm); \ template void unpack(__VA_ARGS__& data, \ std::vector& buffer, int& position, \ Opm::Parallel::MPIComm comm); INSTANTIATE_PACK(float) INSTANTIATE_PACK(double) INSTANTIATE_PACK(bool) INSTANTIATE_PACK(int) INSTANTIATE_PACK(unsigned char) INSTANTIATE_PACK(unsigned int) INSTANTIATE_PACK(unsigned long int) INSTANTIATE_PACK(unsigned long long int) INSTANTIATE_PACK(std::array) INSTANTIATE_PACK(std::array) INSTANTIATE_PACK(std::array) INSTANTIATE_PACK(std::array) INSTANTIATE_PACK(std::array) INSTANTIATE_PACK(std::map,std::pair>) INSTANTIATE_PACK(std::optional) INSTANTIATE_PACK(std::optional) INSTANTIATE_PACK(std::pair) INSTANTIATE_PACK(std::optional>) INSTANTIATE_PACK(std::map>) INSTANTIATE_PACK(std::map,int>>) INSTANTIATE_PACK(std::map) INSTANTIATE_PACK(std::map) INSTANTIATE_PACK(std::map) INSTANTIATE_PACK(std::map) INSTANTIATE_PACK(std::unordered_map) INSTANTIATE_PACK(std::unordered_map, Opm::OrderedMapDetail::TruncatedStringEquals>) INSTANTIATE_PACK(std::unordered_map, Opm::OrderedMapDetail::TruncatedStringEquals<8>>) INSTANTIATE_PACK(std::unordered_map) INSTANTIATE_PACK(std::unordered_set) INSTANTIATE_PACK(std::set) INSTANTIATE_PACK(std::bitset<4>) #undef INSTANTIATE_PACK } // end namespace Mpi RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState, const std::vector& solutionKeys, const std::vector& extraKeys, Parallel::Communication comm) { #if HAVE_MPI RestartValue restartValues{}; if (eclIO) { assert(comm.rank() == 0); restartValues = eclIO->loadRestart(actionState, summaryState, solutionKeys, extraKeys); } EclMpiSerializer ser(comm); ser.broadcast(restartValues); ser.broadcast(summaryState); return restartValues; #else (void) comm; return eclIO->loadRestart(actionState, summaryState, solutionKeys, extraKeys); #endif } } // end namespace Opm