/*
Copyright 2019 Equinor AS.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see .
*/
#include
#if HAVE_MPI
#include
#endif
#include "ParallelRestart.hpp"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define HANDLE_AS_POD(T) \
std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm) \
{ \
return packSize(data, comm, std::integral_constant()); \
} \
void pack(const T& data, std::vector& buffer, int& position, \
Opm::Parallel::MPIComm comm) \
{ \
pack(data, buffer, position, comm, std::integral_constant()); \
} \
void unpack(T& data, std::vector& buffer, int& position, \
Opm::Parallel::MPIComm comm) \
{ \
unpack(data, buffer, position, comm, std::integral_constant()); \
}
namespace Opm
{
namespace Mpi
{
template
std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm,
std::integral_constant)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template
std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm,
std::integral_constant)
{
#if HAVE_MPI
int size;
MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size);
std::size_t totalSize = size;
MPI_Pack_size(l, Dune::MPITraits::getType(), comm, &size);
return totalSize + size;
#else
(void) comm;
return l-l;
#endif
}
template
std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm)
{
return packSize(data, l, comm, typename std::is_pod::type());
}
template
std::size_t packSize(const std::pair& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.first, comm) + packSize(data.second, comm);
}
template
std::size_t packSize(const std::optional& data, Opm::Parallel::MPIComm comm)
{
bool has_value = data.has_value();
std::size_t pack_size = packSize(has_value, comm);
if (has_value)
pack_size += packSize(*data, comm);
return pack_size;
}
template
std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm)
{
if (std::is_pod::value)
// size written automatically
return packSize(data.data(), data.size(), comm);
std::size_t size = packSize(data.size(), comm);
for (const auto& entry: data)
size += packSize(entry, comm);
return size;
}
template
std::size_t packSize(const std::vector& data, Opm::Parallel::MPIComm comm)
{
bool entry;
return packSize(data.size(), comm) + data.size()*packSize(entry,comm);
}
template
typename std::enable_if::value, std::size_t>::type
pack_size_tuple_entry(const Tuple&, Opm::Parallel::MPIComm)
{
return 0;
}
template
typename std::enable_if::value, std::size_t>::type
pack_size_tuple_entry(const Tuple& tuple, Opm::Parallel::MPIComm comm)
{
return packSize(std::get(tuple), comm) + pack_size_tuple_entry(tuple, comm);
}
template
std::size_t packSize(const std::tuple& data, Opm::Parallel::MPIComm comm)
{
return pack_size_tuple_entry(data, comm);
}
template
std::size_t packSize(const std::unordered_set& data,
Opm::Parallel::MPIComm comm)
{
std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry : data)
{
totalSize += packSize(entry, comm);
}
return totalSize;
}
template
std::size_t packSize(const std::set& data,
Opm::Parallel::MPIComm comm)
{
std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry : data)
{
totalSize += packSize(entry, comm);
}
return totalSize;
}
std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm)
{
#if HAVE_MPI
int size;
MPI_Pack_size(1, Dune::MPITraits::getType(), comm, &size);
int totalSize = size;
MPI_Pack_size(strlen(str)+1, MPI_CHAR, comm, &size);
return totalSize + size;
#else
(void) str;
(void) comm;
return 0;
#endif
}
std::size_t packSize(const std::string& str, Opm::Parallel::MPIComm comm)
{
return packSize(str.c_str(), comm);
}
template
std::size_t packSize(const std::map& data, Opm::Parallel::MPIComm comm)
{
std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry: data)
{
totalSize += packSize(entry, comm);
}
return totalSize;
}
template
std::size_t packSize(const std::unordered_map& data, Opm::Parallel::MPIComm comm)
{
std::size_t totalSize = packSize(data.size(), comm);
for (const auto& entry: data)
{
totalSize += packSize(entry, comm);
}
return totalSize;
}
template
std::size_t packSize(const std::array& data, Opm::Parallel::MPIComm comm)
{
return N*packSize(data[0], comm);
}
HANDLE_AS_POD(data::CarterTracyData)
HANDLE_AS_POD(data::Connection)
HANDLE_AS_POD(data::CurrentControl)
HANDLE_AS_POD(data::FetkovichData)
HANDLE_AS_POD(data::GroupConstraints)
HANDLE_AS_POD(data::NodeData)
HANDLE_AS_POD(data::Rates)
HANDLE_AS_POD(data::Segment)
std::size_t packSize(const data::NumericAquiferData& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.initPressure, comm);
}
std::size_t packSize(const data::AquiferData& data, Opm::Parallel::MPIComm comm)
{
const auto type = 0ull;
const auto base = packSize(data.aquiferID, comm)
+ packSize(data.pressure, comm)
+ packSize(data.fluxRate, comm)
+ packSize(data.volume, comm)
+ packSize(data.initPressure, comm)
+ packSize(data.datumDepth, comm)
+ packSize(type, comm);
if (auto const* aquFet = data.typeData.get();
aquFet != nullptr)
{
return base + packSize(*aquFet, comm);
}
else if (auto const* aquCT = data.typeData.get();
aquCT != nullptr)
{
return base + packSize(*aquCT, comm);
}
else if (auto const* aquNum = data.typeData.get();
aquNum != nullptr)
{
return base + packSize(*aquNum, comm);
}
return base;
}
std::size_t packSize(const data::GuideRateValue&, Opm::Parallel::MPIComm comm)
{
const auto nItem = static_cast(data::GuideRateValue::Item::NumItems);
return packSize(std::array{}, comm)
+ packSize(std::array{}, comm);
}
std::size_t packSize(const data::GroupGuideRates& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.production, comm)
+ packSize(data.injection, comm);
}
std::size_t packSize(const data::GroupData& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.currentControl, comm)
+ packSize(data.guideRates, comm);
}
std::size_t packSize(const data::Well& data, Opm::Parallel::MPIComm comm)
{
std::size_t size = packSize(data.rates, comm);
size += packSize(data.bhp, comm) + packSize(data.thp, comm);
size += packSize(data.temperature, comm);
size += packSize(data.control, comm);
size += packSize(data.connections, comm);
size += packSize(data.segments, comm);
size += packSize(data.current_control, comm);
size += packSize(data.guide_rates, comm);
return size;
}
std::size_t packSize(const data::CellData& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.dim, comm) + packSize(data.data, comm) + packSize(data.target, comm);
}
std::size_t packSize(const RestartKey& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.key, comm) + packSize(data.dim, comm) + packSize(data.required, comm);
}
std::size_t packSize(const data::Solution& data, Opm::Parallel::MPIComm comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
return packSize(static_cast&>(data), comm);
}
std::size_t packSize(const data::GroupAndNetworkValues& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.groupData, comm)
+ packSize(data.nodeData, comm);
}
std::size_t packSize(const data::Wells& data, Opm::Parallel::MPIComm comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
return packSize(static_cast&>(data), comm);
}
std::size_t packSize(const RestartValue& data, Opm::Parallel::MPIComm comm)
{
return packSize(data.solution, comm)
+ packSize(data.wells, comm)
+ packSize(data.grp_nwrk, comm)
+ packSize(data.aquifer, comm)
+ packSize(data.extra, comm);
}
std::size_t packSize(const Opm::time_point&, Opm::Parallel::MPIComm comm)
{
std::time_t tp;
return packSize(tp, comm);
}
////// pack routines
template
void pack(const T*, std::size_t, std::vector&, int&,
Opm::Parallel::MPIComm, std::integral_constant)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template
void pack(const T* data, std::size_t l, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm,
std::integral_constant)
{
#if HAVE_MPI
MPI_Pack(&l, 1, Dune::MPITraits::getType(), buffer.data(),
buffer.size(), &position, comm);
MPI_Pack(data, l, Dune::MPITraits::getType(), buffer.data(),
buffer.size(), &position, comm);
#else
(void) data;
(void) comm;
(void) l;
(void) buffer;
(void) position;
#endif
}
template
void pack(const T* data, std::size_t l, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data, l, buffer, position, comm, typename std::is_pod::type());
}
template
void pack(const std::pair& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.first, buffer, position, comm);
pack(data.second, buffer, position, comm);
}
template
void pack(const std::optional& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
bool has_value = data.has_value();
pack(has_value, buffer, position, comm);
if (has_value)
pack(*data, buffer, position, comm);
}
template
void pack(const std::vector& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
if (std::is_pod::value)
{
// size written automatically
pack(data.data(), data.size(), buffer, position, comm);
return;
}
pack(data.size(), buffer, position, comm);
for (const auto& entry: data)
pack(entry, buffer, position, comm);
}
template
void pack(const std::set& data,
std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.size(), buffer, position, comm);
for (const auto& entry : data)
{
pack(entry, buffer, position, comm);
}
}
template
void pack(const std::unordered_set& data,
std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.size(), buffer, position, comm);
for (const auto& entry : data)
{
pack(entry, buffer, position, comm);
}
}
template
void pack(const std::array& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
for (const T& entry : data)
pack(entry, buffer, position, comm);
}
template
void pack(const std::vector& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.size(), buffer, position, comm);
for (const auto entry : data) { // Not a reference: vector range
bool b = entry;
pack(b, buffer, position, comm);
}
}
template
typename std::enable_if::value, void>::type
pack_tuple_entry(const Tuple&, std::vector&, int&,
Opm::Parallel::MPIComm)
{
}
template
typename std::enable_if::value, void>::type
pack_tuple_entry(const Tuple& tuple, std::vector& buffer,
int& position, Opm::Parallel::MPIComm comm)
{
pack(std::get(tuple), buffer, position, comm);
pack_tuple_entry(tuple, buffer, position, comm);
}
template
void pack(const std::tuple& data, std::vector& buffer,
int& position, Opm::Parallel::MPIComm comm)
{
pack_tuple_entry(data, buffer, position, comm);
}
void pack(const char* str, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
#if HAVE_MPI
std::size_t length = strlen(str)+1;
MPI_Pack(&length, 1, Dune::MPITraits::getType(), buffer.data(),
buffer.size(), &position, comm);
MPI_Pack(str, strlen(str)+1, MPI_CHAR, buffer.data(), buffer.size(),
&position, comm);
#else
(void) str;
(void) comm;
(void) buffer;
(void) position;
#endif
}
void pack(const std::string& str, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(str.c_str(), buffer, position, comm);
}
template
void pack(const std::map& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.size(), buffer, position, comm);
for (const auto& entry: data)
{
pack(entry, buffer, position, comm);
}
}
template
void pack(const std::unordered_map& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.size(), buffer, position, comm);
for (const auto& entry: data)
{
pack(entry, buffer, position, comm);
}
}
void pack(const data::NumericAquiferData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.initPressure, buffer, position, comm);
}
void pack(const data::AquiferData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
const auto type =
(data.typeData.is() * (1ull << 0))
+ (data.typeData.is() * (1ull << 1))
+ (data.typeData.is() * (1ull << 2));
pack(data.aquiferID, buffer, position, comm);
pack(data.pressure, buffer, position, comm);
pack(data.fluxRate, buffer, position, comm);
pack(data.volume, buffer, position, comm);
pack(data.initPressure, buffer, position, comm);
pack(data.datumDepth, buffer, position, comm);
pack(type, buffer, position, comm);
if (auto const* aquFet = data.typeData.get();
aquFet != nullptr)
{
pack(*aquFet, buffer, position, comm);
}
else if (auto const* aquCT = data.typeData.get();
aquCT != nullptr)
{
pack(*aquCT, buffer, position, comm);
}
else if (auto const* aquNum = data.typeData.get();
aquNum != nullptr)
{
pack(*aquNum, buffer, position, comm);
}
}
void pack(const data::GuideRateValue& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
using Item = data::GuideRateValue::Item;
const auto nItem = static_cast(Item::NumItems);
auto has = std::array{}; has.fill(0);
auto val = std::array{}; val.fill(0.0);
for (auto itemID = 0*nItem; itemID < nItem; ++itemID) {
const auto item = static_cast- (itemID);
if (data.has(item)) {
has[itemID] = 1;
val[itemID] = data.get(item);
}
}
pack(has, buffer, position, comm);
pack(val, buffer, position, comm);
}
void pack(const data::GroupGuideRates& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.production, buffer, position, comm);
pack(data.injection, buffer, position, comm);
}
void pack(const data::GroupData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.currentControl, buffer, position, comm);
pack(data.guideRates, buffer, position, comm);
}
void pack(const data::Well& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.rates, buffer, position, comm);
pack(data.bhp, buffer, position, comm);
pack(data.thp, buffer, position, comm);
pack(data.temperature, buffer, position, comm);
pack(data.control, buffer, position, comm);
pack(data.connections, buffer, position, comm);
pack(data.segments, buffer, position, comm);
pack(data.current_control, buffer, position, comm);
pack(data.guide_rates, buffer, position, comm);
}
void pack(const RestartKey& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.key, buffer, position, comm);
pack(data.dim, buffer, position, comm);
pack(data.required, buffer, position, comm);
}
void pack(const data::CellData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.dim, buffer, position, comm);
pack(data.data, buffer, position, comm);
pack(data.target, buffer, position, comm);
}
void pack(const data::Solution& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
pack(static_cast&>(data),
buffer, position, comm);
}
void pack(const data::Wells& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
pack(static_cast&>(data),
buffer, position, comm);
}
void pack(const data::GroupAndNetworkValues& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.groupData, buffer, position, comm);
pack(data.nodeData, buffer, position, comm);
}
void pack(const RestartValue& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(data.solution, buffer, position, comm);
pack(data.wells, buffer, position, comm);
pack(data.grp_nwrk, buffer, position, comm);
pack(data.aquifer, buffer, position, comm);
pack(data.extra, buffer, position, comm);
}
void pack(const Opm::time_point& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
pack(Opm::TimeService::to_time_t(data), buffer, position, comm);
}
/// unpack routines
template
void unpack(T*, const std::size_t&, std::vector&, int&,
Opm::Parallel::MPIComm, std::integral_constant)
{
OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
}
template
void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm,
std::integral_constant)
{
#if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, data, l,
Dune::MPITraits::getType(), comm);
#else
(void) data;
(void) comm;
(void) l;
(void) buffer;
(void) position;
#endif
}
template
void unpack(T* data, const std::size_t& l, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data, l, buffer, position, comm, typename std::is_pod::type());
}
template
void unpack(std::pair& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.first, buffer, position, comm);
unpack(data.second, buffer, position, comm);
}
template
void unpack(std::optional&data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
bool has_value;
unpack(has_value, buffer, position, comm);
if (has_value) {
T val;
unpack(val, buffer, position, comm);
data = std::optional(val);
} else
data.reset();
}
template
void unpack(std::vector& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::size_t length = 0;
unpack(length, buffer, position, comm);
data.resize(length);
if (std::is_pod::value)
{
unpack(data.data(), data.size(), buffer, position, comm);
return;
}
for (auto& entry: data)
unpack(entry, buffer, position, comm);
}
template
void unpack(std::vector& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
size_t size;
unpack(size, buffer, position, comm);
data.clear();
data.reserve(size);
for (size_t i = 0; i < size; ++i) {
bool entry;
unpack(entry, buffer, position, comm);
data.push_back(entry);
}
}
template
typename std::enable_if::value, void>::type
unpack_tuple_entry(Tuple&, std::vector&, int&,
Opm::Parallel::MPIComm)
{
}
template
typename std::enable_if::value, void>::type
unpack_tuple_entry(Tuple& tuple, std::vector& buffer,
int& position, Opm::Parallel::MPIComm comm)
{
unpack(std::get(tuple), buffer, position, comm);
unpack_tuple_entry(tuple, buffer, position, comm);
}
template
void unpack(std::tuple& data, std::vector& buffer,
int& position, Opm::Parallel::MPIComm comm)
{
unpack_tuple_entry(data, buffer, position, comm);
}
template
void unpack(std::set& data,
std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::size_t size = 0;
unpack(size, buffer, position, comm);
for (;size>0; size--)
{
K entry;
unpack(entry, buffer, position, comm);
data.insert(entry);
}
}
template
void unpack(std::unordered_set& data,
std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::size_t size=0;
unpack(size, buffer, position, comm);
for (;size>0; size--)
{
T entry;
unpack(entry, buffer, position, comm);
data.insert(entry);
}
}
template
void unpack(std::array& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
for (T& entry : data)
unpack(entry, buffer, position, comm);
}
void unpack(char* str, std::size_t length, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
#if HAVE_MPI
MPI_Unpack(buffer.data(), buffer.size(), &position, const_cast(str), length, MPI_CHAR, comm);
#else
(void) str;
(void) comm;
(void) length;
(void) buffer;
(void) position;
#endif
}
void unpack(std::string& str, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::size_t length=0;
unpack(length, buffer, position, comm);
std::vector cStr(length, '\0');
unpack(cStr.data(), length, buffer, position, comm);
str.clear();
str.append(cStr.data());
}
template
void unpack(std::map& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::size_t size=0;
unpack(size, buffer, position, comm);
for (;size>0; size--)
{
std::pair entry;
unpack(entry, buffer, position, comm);
data.insert(entry);
}
}
template
void unpack(std::unordered_map& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::size_t size=0;
unpack(size, buffer, position, comm);
for (;size>0; size--)
{
std::pair entry;
unpack(entry, buffer, position, comm);
data.insert(entry);
}
}
void unpack(data::Well& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.rates, buffer, position, comm);
unpack(data.bhp, buffer, position, comm);
unpack(data.thp, buffer, position, comm);
unpack(data.temperature, buffer, position, comm);
unpack(data.control, buffer, position, comm);
unpack(data.connections, buffer, position, comm);
unpack(data.segments, buffer, position, comm);
unpack(data.current_control, buffer, position, comm);
unpack(data.guide_rates, buffer, position, comm);
}
void unpack(data::NumericAquiferData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.initPressure, buffer, position, comm);
}
void unpack(data::AquiferData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
auto type = 0ull;
unpack(data.aquiferID, buffer, position, comm);
unpack(data.pressure, buffer, position, comm);
unpack(data.fluxRate, buffer, position, comm);
unpack(data.volume, buffer, position, comm);
unpack(data.initPressure, buffer, position, comm);
unpack(data.datumDepth, buffer, position, comm);
unpack(type, buffer, position, comm);
if (type == (1ull << 0)) {
auto* aquFet = data.typeData.create();
unpack(*aquFet, buffer, position, comm);
}
else if (type == (1ull << 1)) {
auto* aquCT = data.typeData.create();
unpack(*aquCT, buffer, position, comm);
}
else if (type == (1ull << 2)) {
auto* aquNum = data.typeData.create();
unpack(*aquNum, buffer, position, comm);
}
}
void unpack(data::GuideRateValue& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
using Item = data::GuideRateValue::Item;
const auto nItem = static_cast(Item::NumItems);
auto has = std::array{};
auto val = std::array{};
unpack(has, buffer, position, comm);
unpack(val, buffer, position, comm);
for (auto itemID = 0*nItem; itemID < nItem; ++itemID) {
if (has[itemID] != 0) {
data.set(static_cast
- (itemID), val[itemID]);
}
}
}
void unpack(data::GroupGuideRates& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.production, buffer, position, comm);
unpack(data.injection, buffer, position, comm);
}
void unpack(data::GroupData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.currentControl, buffer, position, comm);
unpack(data.guideRates, buffer, position, comm);
}
void unpack(RestartKey& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.key, buffer, position, comm);
unpack(data.dim, buffer, position, comm);
unpack(data.required, buffer, position, comm);
}
void unpack(data::CellData& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.dim, buffer, position, comm);
unpack(data.data, buffer, position, comm);
unpack(data.target, buffer, position, comm);
}
void unpack(data::Solution& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
unpack(static_cast&>(data),
buffer, position, comm);
}
void unpack(data::Wells& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
// Needs explicit conversion to a supported base type holding the data
// to prevent throwing.
unpack(static_cast&>(data),
buffer, position, comm);
}
void unpack(data::GroupAndNetworkValues& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.groupData, buffer, position, comm);
unpack(data.nodeData, buffer, position, comm);
}
void unpack(RestartValue& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
unpack(data.solution, buffer, position, comm);
unpack(data.wells, buffer, position, comm);
unpack(data.grp_nwrk, buffer, position, comm);
unpack(data.aquifer, buffer, position, comm);
unpack(data.extra, buffer, position, comm);
}
void unpack([[maybe_unused]] Opm::time_point& data, std::vector& buffer, int& position,
Opm::Parallel::MPIComm comm)
{
std::time_t tp;
unpack(tp, buffer, position, comm);
#if HAVE_MPI
data = Opm::TimeService::from_time_t(tp);
#endif
}
#define INSTANTIATE_PACK_VECTOR(...) \
template std::size_t packSize(const std::vector<__VA_ARGS__>& data, \
Opm::Parallel::MPIComm comm); \
template void pack(const std::vector<__VA_ARGS__>& data, \
std::vector& buffer, int& position, \
Opm::Parallel::MPIComm comm); \
template void unpack(std::vector<__VA_ARGS__>& data, \
std::vector& buffer, int& position, \
Opm::Parallel::MPIComm comm);
INSTANTIATE_PACK_VECTOR(float)
INSTANTIATE_PACK_VECTOR(double)
INSTANTIATE_PACK_VECTOR(std::vector)
INSTANTIATE_PACK_VECTOR(bool)
INSTANTIATE_PACK_VECTOR(char)
INSTANTIATE_PACK_VECTOR(int)
INSTANTIATE_PACK_VECTOR(unsigned char)
INSTANTIATE_PACK_VECTOR(unsigned int)
INSTANTIATE_PACK_VECTOR(unsigned long int)
INSTANTIATE_PACK_VECTOR(unsigned long long int)
INSTANTIATE_PACK_VECTOR(std::time_t)
INSTANTIATE_PACK_VECTOR(std::array)
INSTANTIATE_PACK_VECTOR(std::pair)
INSTANTIATE_PACK_VECTOR(std::map)
INSTANTIATE_PACK_VECTOR(std::pair>)
INSTANTIATE_PACK_VECTOR(std::pair>)
INSTANTIATE_PACK_VECTOR(std::pair>)
INSTANTIATE_PACK_VECTOR(std::string)
#undef INSTANTIATE_PACK_VECTOR
#undef INSTANTIATE_PACK_SET
#define INSTANTIATE_PACK(...) \
template std::size_t packSize(const __VA_ARGS__& data, \
Opm::Parallel::MPIComm comm); \
template void pack(const __VA_ARGS__& data, \
std::vector& buffer, int& position, \
Opm::Parallel::MPIComm comm); \
template void unpack(__VA_ARGS__& data, \
std::vector& buffer, int& position, \
Opm::Parallel::MPIComm comm);
INSTANTIATE_PACK(float)
INSTANTIATE_PACK(double)
INSTANTIATE_PACK(bool)
INSTANTIATE_PACK(int)
INSTANTIATE_PACK(unsigned char)
INSTANTIATE_PACK(unsigned int)
INSTANTIATE_PACK(unsigned long int)
INSTANTIATE_PACK(unsigned long long int)
INSTANTIATE_PACK(std::array)
INSTANTIATE_PACK(std::array)
INSTANTIATE_PACK(std::array)
INSTANTIATE_PACK(std::map,std::pair>)
INSTANTIATE_PACK(std::optional)
INSTANTIATE_PACK(std::optional)
INSTANTIATE_PACK(std::pair)
INSTANTIATE_PACK(std::optional>)
INSTANTIATE_PACK(std::map>)
INSTANTIATE_PACK(std::map,int>>)
INSTANTIATE_PACK(std::map)
INSTANTIATE_PACK(std::map)
INSTANTIATE_PACK(std::map)
INSTANTIATE_PACK(std::map)
INSTANTIATE_PACK(std::unordered_map)
INSTANTIATE_PACK(std::unordered_map)
INSTANTIATE_PACK(std::unordered_set)
INSTANTIATE_PACK(std::set)
#undef INSTANTIATE_PACK
} // end namespace Mpi
RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState,
const std::vector& solutionKeys,
const std::vector