ParallelWellInfo: template Scalar type

This commit is contained in:
Arne Morten Kvarving 2024-02-20 15:35:13 +01:00
parent 81189b89c7
commit 16f5290038
33 changed files with 316 additions and 218 deletions

View File

@ -274,11 +274,11 @@ getLocalWells(const int timeStepIdx) const
}
template<class Scalar>
std::vector<std::reference_wrapper<ParallelWellInfo>>
std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>
BlackoilWellModelGeneric<Scalar>::
createLocalParallelWellInfo(const std::vector<Well>& wells)
{
std::vector<std::reference_wrapper<ParallelWellInfo>> local_parallel_well_info;
std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>> local_parallel_well_info;
local_parallel_well_info.reserve(wells.size());
for (const auto& well : wells)
{
@ -318,7 +318,7 @@ initializeWellPerfData()
int connection_index = 0;
// INVALID_ECL_INDEX marks no above perf available
int connection_index_above = ParallelWellInfo::INVALID_ECL_INDEX;
int connection_index_above = ParallelWellInfo<Scalar>::INVALID_ECL_INDEX;
well_perf_data_[well_index].clear();
well_perf_data_[well_index].reserve(well.getConnections().size());

View File

@ -58,7 +58,7 @@ namespace Opm {
template<class Scalar> class GasLiftWellState;
class Group;
class GuideRateConfig;
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
class RestartValue;
class Schedule;
struct SimulatorUpdate;
@ -323,7 +323,7 @@ protected:
/// \brief Create the parallel well information
/// \param localWells The local wells from ECL schedule
std::vector<std::reference_wrapper<ParallelWellInfo>>
std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>
createLocalParallelWellInfo(const std::vector<Well>& wells);
void initializeWellProdIndCalculators();
@ -546,8 +546,8 @@ protected:
std::vector<int> local_shut_wells_{};
std::vector<ParallelWellInfo> parallel_well_info_;
std::vector<std::reference_wrapper<ParallelWellInfo>> local_parallel_well_info_;
std::vector<ParallelWellInfo<Scalar>> parallel_well_info_;
std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>> local_parallel_well_info_;
std::vector<WellProdIndexCalculator> prod_index_calc_;
mutable ParallelWBPCalculation wbpCalculationService_;

View File

@ -68,7 +68,7 @@ namespace Opm
using typename Base::PressureMatrix;
MultisegmentWell(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,

View File

@ -57,7 +57,7 @@ namespace Opm
template <typename TypeTag>
MultisegmentWell<TypeTag>::
MultisegmentWell(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,

View File

@ -173,7 +173,7 @@ Opm::ParallelWBPCalculation::evalCellSource(Evaluator evalCellSrc)
std::size_t
Opm::ParallelWBPCalculation::
createCalculator(const Well& well,
const ParallelWellInfo& parallelWellInfo,
const ParallelWellInfo<double>& parallelWellInfo,
const std::vector<int>& localConnIdx,
EvaluatorFactory makeWellSourceEvaluator)
{

View File

@ -36,7 +36,7 @@
namespace Opm {
class GridDims;
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
class PAvg;
class Well;
}
@ -112,7 +112,7 @@ public:
/// well.
std::size_t
createCalculator(const Well& well,
const ParallelWellInfo& parallelWellInfo,
const ParallelWellInfo<double>& parallelWellInfo,
const std::vector<int>& localConnIdx,
EvaluatorFactory makeWellSourceEvaluator);

View File

@ -45,15 +45,33 @@ struct CommPolicy<double*>
return 1;
}
};
template<>
struct CommPolicy<float*>
{
using Type = float*;
using IndexedType = float;
using IndexedTypeFlag = Dune::SizeOne;
static const void* getAddress(const float*& v, int index)
{
return v + index;
}
static int getSize(const float*&, int)
{
return 1;
}
};
#endif
}
namespace Opm
{
GlobalPerfContainerFactory::GlobalPerfContainerFactory(const IndexSet& local_indices,
const Parallel::Communication comm,
const int num_local_perfs)
template<class Scalar>
GlobalPerfContainerFactory<Scalar>::
GlobalPerfContainerFactory(const IndexSet& local_indices,
const Parallel::Communication comm,
const int num_local_perfs)
: local_indices_(local_indices), comm_(comm)
{
if ( comm_.size() > 1 )
@ -101,13 +119,13 @@ GlobalPerfContainerFactory::GlobalPerfContainerFactory(const IndexSet& local_ind
}
}
std::vector<double> GlobalPerfContainerFactory::createGlobal(const std::vector<double>& local_perf_container,
std::size_t num_components) const
template<class Scalar>
std::vector<Scalar> GlobalPerfContainerFactory<Scalar>::
createGlobal(const std::vector<Scalar>& local_perf_container,
std::size_t num_components) const
{
// Could be become templated later.
using Value = double;
using Value = Scalar;
if (comm_.size() > 1)
{
@ -152,8 +170,11 @@ std::vector<double> GlobalPerfContainerFactory::createGlobal(const std::vector<d
}
}
void GlobalPerfContainerFactory::copyGlobalToLocal(const std::vector<double>& global, std::vector<double>& local,
std::size_t num_components) const
template<class Scalar>
void GlobalPerfContainerFactory<Scalar>::
copyGlobalToLocal(const std::vector<Scalar>& global,
std::vector<Scalar>& local,
std::size_t num_components) const
{
if (global.empty())
{
@ -182,19 +203,22 @@ void GlobalPerfContainerFactory::copyGlobalToLocal(const std::vector<double>& gl
}
}
int GlobalPerfContainerFactory::numGlobalPerfs() const
template<class Scalar>
int GlobalPerfContainerFactory<Scalar>::numGlobalPerfs() const
{
return num_global_perfs_;
}
CommunicateAboveBelow::CommunicateAboveBelow([[maybe_unused]] const Parallel::Communication& comm)
template<class Scalar>
CommunicateAboveBelow<Scalar>::
CommunicateAboveBelow([[maybe_unused]] const Parallel::Communication& comm)
#if HAVE_MPI
: comm_(comm), interface_(comm_)
#endif
{}
void CommunicateAboveBelow::clear()
template<class Scalar>
void CommunicateAboveBelow<Scalar>::clear()
{
#if HAVE_MPI
above_indices_ = {};
@ -205,7 +229,8 @@ void CommunicateAboveBelow::clear()
num_local_perfs_ = 0;
}
void CommunicateAboveBelow::beginReset()
template<class Scalar>
void CommunicateAboveBelow<Scalar>::beginReset()
{
clear();
#if HAVE_MPI
@ -217,7 +242,8 @@ void CommunicateAboveBelow::beginReset()
#endif
}
int CommunicateAboveBelow::endReset()
template<class Scalar>
int CommunicateAboveBelow<Scalar>::endReset()
{
#if HAVE_MPI
if (comm_.size() > 1)
@ -226,7 +252,7 @@ int CommunicateAboveBelow::endReset()
current_indices_.endResize();
remote_indices_.setIndexSets(current_indices_, above_indices_, comm_);
// It is mandatory to not set includeSelf to true, as this will skip some entries.
remote_indices_.rebuild<true>();
remote_indices_.template rebuild<true>();
using FromSet = Dune::EnumItem<Attribute,owner>;
using ToSet = Dune::AllSet<Attribute>;
interface_.build(remote_indices_, FromSet(), ToSet());
@ -236,8 +262,9 @@ int CommunicateAboveBelow::endReset()
return num_local_perfs_;
}
template<class Scalar>
template<class RAIterator>
void CommunicateAboveBelow::partialSumPerfValues(RAIterator begin, RAIterator end) const
void CommunicateAboveBelow<Scalar>::partialSumPerfValues(RAIterator begin, RAIterator end) const
{
if (this->comm_.size() < 2)
{
@ -295,28 +322,27 @@ void CommunicateAboveBelow::partialSumPerfValues(RAIterator begin, RAIterator en
}
}
using dIter = typename std::vector<double>::iterator;
template void CommunicateAboveBelow::partialSumPerfValues<dIter>(dIter begin, dIter end) const;
template<class Scalar>
struct CopyGatherScatter
{
static const double& gather(const double* a, std::size_t i)
static const Scalar& gather(const Scalar* a, std::size_t i)
{
return a[i];
}
static void scatter(double* a, const double& v, std::size_t i)
static void scatter(Scalar* a, const Scalar& v, std::size_t i)
{
a[i] = v;
}
};
std::vector<double> CommunicateAboveBelow::communicateAbove(double first_above,
const double* current,
std::size_t size)
template<class Scalar>
std::vector<Scalar> CommunicateAboveBelow<Scalar>::
communicateAbove(Scalar first_above,
const Scalar* current,
std::size_t size)
{
std::vector<double> above(size, first_above);
std::vector<Scalar> above(size, first_above);
#if HAVE_MPI
if (comm_.size() > 1)
@ -326,7 +352,7 @@ std::vector<double> CommunicateAboveBelow::communicateAbove(double first_above,
// passing const double*& and double* as parameter is
// incompatible with function decl template<Data> forward(const Data&, Data&))
// That would need the first argument to be double* const&
communicator_.forward<CopyGatherScatter>(const_cast<double*>(current), aboveData);
communicator_.forward<CopyGatherScatter<Scalar>>(const_cast<Scalar*>(current), aboveData);
}
else
#endif
@ -339,11 +365,14 @@ std::vector<double> CommunicateAboveBelow::communicateAbove(double first_above,
}
return above;
}
std::vector<double> CommunicateAboveBelow::communicateBelow(double last_below,
const double* current,
std::size_t size)
template<class Scalar>
std::vector<Scalar> CommunicateAboveBelow<Scalar>::
communicateBelow(Scalar last_below,
const Scalar* current,
std::size_t size)
{
std::vector<double> below(size, last_below);
std::vector<Scalar> below(size, last_below);
#if HAVE_MPI
if (comm_.size() > 1)
@ -353,7 +382,7 @@ std::vector<double> CommunicateAboveBelow::communicateBelow(double last_below,
// passing const double*& and double* as parameter is
// incompatible with function decl template<Data> backward(Data&, const Data&)
// That would need the first argument to be double* const&
communicator_.backward<CopyGatherScatter>(belowData, const_cast<double*>(current));
communicator_.backward<CopyGatherScatter<Scalar>>(belowData, const_cast<Scalar*>(current));
}
else
#endif
@ -367,9 +396,11 @@ std::vector<double> CommunicateAboveBelow::communicateBelow(double last_below,
return below;
}
void CommunicateAboveBelow::pushBackEclIndex([[maybe_unused]] int above,
[[maybe_unused]] int current,
[[maybe_unused]] bool isOwner)
template<class Scalar>
void CommunicateAboveBelow<Scalar>::
pushBackEclIndex([[maybe_unused]] int above,
[[maybe_unused]] int current,
[[maybe_unused]] bool isOwner)
{
#if HAVE_MPI
if (comm_.size() > 1)
@ -386,8 +417,8 @@ void CommunicateAboveBelow::pushBackEclIndex([[maybe_unused]] int above,
++num_local_perfs_;
}
void ParallelWellInfo::DestroyComm::operator()(Parallel::Communication* comm)
template<class Scalar>
void ParallelWellInfo<Scalar>::DestroyComm::operator()(Parallel::Communication* comm)
{
#if HAVE_MPI
// Only delete custom communicators.
@ -406,28 +437,31 @@ void ParallelWellInfo::DestroyComm::operator()(Parallel::Communication* comm)
delete comm;
}
const CommunicateAboveBelow::IndexSet& CommunicateAboveBelow::getIndexSet() const
template<class Scalar>
const typename CommunicateAboveBelow<Scalar>::IndexSet&
CommunicateAboveBelow<Scalar>::getIndexSet() const
{
return current_indices_;
}
int CommunicateAboveBelow::numLocalPerfs() const
template<class Scalar>
int CommunicateAboveBelow<Scalar>::numLocalPerfs() const
{
return num_local_perfs_;
}
ParallelWellInfo::ParallelWellInfo(const std::string& name,
bool hasLocalCells)
template<class Scalar>
ParallelWellInfo<Scalar>::ParallelWellInfo(const std::string& name,
bool hasLocalCells)
: name_(name), hasLocalCells_ (hasLocalCells),
isOwner_(true), rankWithFirstPerf_(-1),
comm_(new Parallel::Communication(Dune::MPIHelper::getLocalCommunicator())),
commAboveBelow_(new CommunicateAboveBelow(*comm_))
commAboveBelow_(new CommunicateAboveBelow<Scalar>(*comm_))
{}
ParallelWellInfo::ParallelWellInfo(const std::pair<std::string, bool>& well_info,
[[maybe_unused]] Parallel::Communication allComm)
template<class Scalar>
ParallelWellInfo<Scalar>::ParallelWellInfo(const std::pair<std::string, bool>& well_info,
[[maybe_unused]] Parallel::Communication allComm)
: name_(well_info.first), hasLocalCells_(well_info.second),
rankWithFirstPerf_(-1)
{
@ -439,12 +473,12 @@ ParallelWellInfo::ParallelWellInfo(const std::pair<std::string, bool>& well_info
#else
comm_.reset(new Parallel::Communication(Dune::MPIHelper::getLocalCommunicator()));
#endif
commAboveBelow_.reset(new CommunicateAboveBelow(*comm_));
commAboveBelow_.reset(new CommunicateAboveBelow<Scalar>(*comm_));
isOwner_ = (comm_->rank() == 0);
}
void ParallelWellInfo::communicateFirstPerforation(bool hasFirst)
template<class Scalar>
void ParallelWellInfo<Scalar>::communicateFirstPerforation(bool hasFirst)
{
int first = hasFirst;
std::vector<int> firstVec(comm_->size());
@ -455,29 +489,32 @@ void ParallelWellInfo::communicateFirstPerforation(bool hasFirst)
rankWithFirstPerf_ = found - firstVec.begin();
}
void ParallelWellInfo::pushBackEclIndex(int above, int current)
template<class Scalar>
void ParallelWellInfo<Scalar>::pushBackEclIndex(int above, int current)
{
commAboveBelow_->pushBackEclIndex(above, current);
}
void ParallelWellInfo::beginReset()
template<class Scalar>
void ParallelWellInfo<Scalar>::beginReset()
{
commAboveBelow_->beginReset();
}
void ParallelWellInfo::endReset()
template<class Scalar>
void ParallelWellInfo<Scalar>::endReset()
{
int local_num_perfs = commAboveBelow_->endReset();
globalPerfCont_
.reset(new GlobalPerfContainerFactory(commAboveBelow_->getIndexSet(),
*comm_,
local_num_perfs));
.reset(new GlobalPerfContainerFactory<Scalar>(commAboveBelow_->getIndexSet(),
*comm_,
local_num_perfs));
}
template<class Scalar>
template<typename It>
typename It::value_type
ParallelWellInfo::sumPerfValues(It begin, It end) const
ParallelWellInfo<Scalar>::sumPerfValues(It begin, It end) const
{
using V = typename It::value_type;
/// \todo cater for overlap later. Currently only owner
@ -485,19 +522,16 @@ ParallelWellInfo::sumPerfValues(It begin, It end) const
return communication().sum(local);
}
using cdIter = typename std::vector<double>::const_iterator;
template typename cdIter::value_type ParallelWellInfo::sumPerfValues<cdIter>(cdIter,cdIter) const;
template typename dIter::value_type ParallelWellInfo::sumPerfValues<dIter>(dIter,dIter) const;
void ParallelWellInfo::clear()
template<class Scalar>
void ParallelWellInfo<Scalar>::clear()
{
commAboveBelow_->clear();
globalPerfCont_.reset();
}
template<class Scalar>
template<class T>
T ParallelWellInfo::broadcastFirstPerforationValue(const T& t) const
T ParallelWellInfo<Scalar>::broadcastFirstPerforationValue(const T& t) const
{
T res = t;
if (rankWithFirstPerf_ >= 0) {
@ -515,41 +549,50 @@ T ParallelWellInfo::broadcastFirstPerforationValue(const T& t) const
return res;
}
template int ParallelWellInfo::broadcastFirstPerforationValue<int>(const int&) const;
template double ParallelWellInfo::broadcastFirstPerforationValue<double>(const double&) const;
template int ParallelWellInfo<double>::broadcastFirstPerforationValue<int>(const int&) const;
template double ParallelWellInfo<double>::broadcastFirstPerforationValue<double>(const double&) const;
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
const double* current_values,
std::size_t size) const
template<class Scalar>
std::vector<Scalar> ParallelWellInfo<Scalar>::
communicateAboveValues(Scalar zero_value,
const Scalar* current_values,
std::size_t size) const
{
return commAboveBelow_->communicateAbove(zero_value, current_values,
size);
}
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
const std::vector<double>& current_values) const
template<class Scalar>
std::vector<Scalar> ParallelWellInfo<Scalar>::
communicateAboveValues(Scalar zero_value,
const std::vector<Scalar>& current_values) const
{
return commAboveBelow_->communicateAbove(zero_value, current_values.data(),
current_values.size());
}
std::vector<double> ParallelWellInfo::communicateBelowValues(double last_value,
const double* current_values,
std::size_t size) const
template<class Scalar>
std::vector<Scalar> ParallelWellInfo<Scalar>::
communicateBelowValues(Scalar last_value,
const Scalar* current_values,
std::size_t size) const
{
return commAboveBelow_->communicateBelow(last_value, current_values,
size);
}
std::vector<double> ParallelWellInfo::communicateBelowValues(double last_value,
const std::vector<double>& current_values) const
template<class Scalar>
std::vector<Scalar> ParallelWellInfo<Scalar>::
communicateBelowValues(Scalar last_value,
const std::vector<Scalar>& current_values) const
{
return commAboveBelow_->communicateBelow(last_value, current_values.data(),
current_values.size());
}
const GlobalPerfContainerFactory&
ParallelWellInfo::getGlobalPerfContainerFactory() const
template<class Scalar>
const GlobalPerfContainerFactory<Scalar>&
ParallelWellInfo<Scalar>::getGlobalPerfContainerFactory() const
{
if(globalPerfCont_)
return *globalPerfCont_;
@ -558,12 +601,15 @@ ParallelWellInfo::getGlobalPerfContainerFactory() const
"No ecl indices have been added via beginReset, pushBackEclIndex, endReset");
}
bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
template<class Scalar>
bool operator<(const ParallelWellInfo<Scalar>& well1, const ParallelWellInfo<Scalar>& well2)
{
return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells());
return well1.name() < well2.name() || (! (well2.name() < well1.name()) &&
well1.hasLocalCells() < well2.hasLocalCells());
}
bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
template<class Scalar>
bool operator==(const ParallelWellInfo<Scalar>& well1, const ParallelWellInfo<Scalar>& well2)
{
bool ret = well1.name() == well2.name() && well1.hasLocalCells() == well2.hasLocalCells()
&& well1.isOwner() == well2.isOwner();
@ -575,56 +621,67 @@ bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
return ret;
}
bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
template<class Scalar>
bool operator!=(const ParallelWellInfo<Scalar>& well1, const ParallelWellInfo<Scalar>& well2)
{
return ! (well1 == well2);
}
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
template<class Scalar>
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo<Scalar>& well)
{
return pair.first < well.name() || ( !( well.name() < pair.first ) && pair.second < well.hasLocalCells() );
}
bool operator<( const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
template<class Scalar>
bool operator<( const ParallelWellInfo<Scalar>& well, const std::pair<std::string, bool>& pair)
{
return well.name() < pair.first || ( !( pair.first < well.name() ) && well.hasLocalCells() < pair.second );
}
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
template<class Scalar>
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo<Scalar>& well)
{
return pair.first == well.name() && pair.second == well.hasLocalCells();
}
bool operator==(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
template<class Scalar>
bool operator==(const ParallelWellInfo<Scalar>& well, const std::pair<std::string, bool>& pair)
{
return pair == well;
}
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
template<class Scalar>
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo<Scalar>& well)
{
return pair.first != well.name() || pair.second != well.hasLocalCells();
}
bool operator!=(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
template<class Scalar>
bool operator!=(const ParallelWellInfo<Scalar>& well, const std::pair<std::string, bool>& pair)
{
return pair != well;
}
CheckDistributedWellConnections::CheckDistributedWellConnections(const Well& well,
const ParallelWellInfo& info)
template<class Scalar>
CheckDistributedWellConnections<Scalar>::
CheckDistributedWellConnections(const Well& well,
const ParallelWellInfo<Scalar>& info)
: well_(well), pwinfo_(info)
{
foundConnections_.resize(well.getConnections().size(), 0);
}
void
CheckDistributedWellConnections::connectionFound(std::size_t i)
template<class Scalar>
void CheckDistributedWellConnections<Scalar>::
connectionFound(std::size_t i)
{
foundConnections_[i] = 1;
}
bool
CheckDistributedWellConnections::checkAllConnectionsFound()
template<class Scalar>
bool CheckDistributedWellConnections<Scalar>::
checkAllConnectionsFound()
{
// Ecl does not hold any information of remote connections.
assert(pwinfo_.communication().max(foundConnections_.size()) == foundConnections_.size());
@ -652,4 +709,30 @@ CheckDistributedWellConnections::checkAllConnectionsFound()
}
return !missingCells;
}
template<class Scalar> using dIter = typename std::vector<Scalar>::iterator;
template<class Scalar> using cdIter = typename std::vector<Scalar>::const_iterator;
#define INSTANCE(T) \
template class CheckDistributedWellConnections<T>; \
template class CommunicateAboveBelow<T>; \
template class GlobalPerfContainerFactory<T>; \
template class ParallelWellInfo<T>; \
template typename cdIter<T>::value_type \
ParallelWellInfo<T>::sumPerfValues<cdIter<T>>(cdIter<T>,cdIter<T>) const; \
template typename dIter<T>::value_type \
ParallelWellInfo<T>::sumPerfValues<dIter<T>>(dIter<T>,dIter<T>) const; \
template void CommunicateAboveBelow<T>::partialSumPerfValues<dIter<T>>(dIter<T>, dIter<T>) const; \
template bool operator<(const ParallelWellInfo<T>&, const ParallelWellInfo<T>&); \
template bool operator<(const ParallelWellInfo<T>&, const std::pair<std::string, bool>&); \
template bool operator<(const std::pair<std::string, bool>&, const ParallelWellInfo<T>&); \
template bool operator==(const ParallelWellInfo<T>&, const ParallelWellInfo<T>&); \
template bool operator==(const ParallelWellInfo<T>& well, const std::pair<std::string, bool>&); \
template bool operator==(const std::pair<std::string, bool>&, const ParallelWellInfo<T>&); \
template bool operator!=(const ParallelWellInfo<T>&, const ParallelWellInfo<T>&); \
template bool operator!=(const std::pair<std::string, bool>&, const ParallelWellInfo<T>&); \
template bool operator!=(const ParallelWellInfo<T>&, const std::pair<std::string, bool>&);
INSTANCE(double)
} // end namespace Opm

View File

@ -29,13 +29,13 @@
#include <memory>
namespace Opm
{
namespace Opm {
class Well;
/// \brief Class to facilitate getting values associated with the above/below perforation
///
template<class Scalar>
class CommunicateAboveBelow
{
public:
@ -83,8 +83,8 @@ public:
/// \param current C-array of the values at the perforations
/// \param size The size of the C-array and the returned vector
/// \return a vector containing the values for the perforation above.
std::vector<double> communicateAbove(double first_value,
const double* current,
std::vector<Scalar> communicateAbove(Scalar first_value,
const Scalar* current,
std::size_t size);
/// \brief Creates an array of values for the perforation below.
@ -92,8 +92,8 @@ public:
/// \param current C-array of the values at the perforations
/// \param size The size of the C-array and the returned vector
/// \return a vector containing the values for the perforation above.
std::vector<double> communicateBelow(double first_value,
const double* current,
std::vector<Scalar> communicateBelow(Scalar first_value,
const Scalar* current,
std::size_t size);
/// \brief Do a (in place) partial sum on values attached to all perforations.
@ -131,11 +131,12 @@ private:
/// even if the data is distributed. This class is supposed to help with that by
/// computing the global data arrays for the well and copy computed values back to
/// the distributed representation.
template<class Scalar>
class GlobalPerfContainerFactory
{
public:
using IndexSet = CommunicateAboveBelow::IndexSet;
using Attribute = CommunicateAboveBelow::Attribute;
using IndexSet = typename CommunicateAboveBelow<Scalar>::IndexSet;
using Attribute = typename CommunicateAboveBelow<Scalar>::Attribute;
using GlobalIndex = typename IndexSet::IndexPair::GlobalIndex;
/// \brief Constructor
@ -149,14 +150,14 @@ public:
/// \param num_components the number of components per perforation.
/// \return A container with values attached to all perforations of a well.
/// Values are ordered by the index of the perforation in the ECL schedule.
std::vector<double> createGlobal(const std::vector<double>& local_perf_container,
std::vector<Scalar> createGlobal(const std::vector<Scalar>& local_perf_container,
std::size_t num_components) const;
/// \brief Copies the values of the global perforation to the local representation
/// \param global values attached to all peforations of a well (as if the well would live on one process)
/// \param num_components the number of components per perforation.
/// \param[out] local The values attached to the local perforations only.
void copyGlobalToLocal(const std::vector<double>& global, std::vector<double>& local,
void copyGlobalToLocal(const std::vector<Scalar>& global, std::vector<Scalar>& local,
std::size_t num_components) const;
int numGlobalPerfs() const;
@ -180,6 +181,7 @@ private:
/// \brief Class encapsulating some information about parallel wells
///
/// e.g. It provides a communicator for well information
template<class Scalar>
class ParallelWellInfo
{
public:
@ -218,30 +220,30 @@ public:
/// \param current C-array of the values at the perforations
/// \param size The size of the C-array and the returned vector
/// \return a vector containing the values for the perforation above.
std::vector<double> communicateAboveValues(double first_value,
const double* current,
std::vector<Scalar> communicateAboveValues(Scalar first_value,
const Scalar* current,
std::size_t size) const;
/// \brief Creates an array of values for the perforation above.
/// \param first_value Value to use for above of the first perforation
/// \param current vector of current values
std::vector<double> communicateAboveValues(double first_value,
const std::vector<double>& current) const;
std::vector<Scalar> communicateAboveValues(Scalar first_value,
const std::vector<Scalar>& current) const;
/// \brief Creates an array of values for the perforation below.
/// \param last_value Value to use for below of the last perforation
/// \param current C-array of the values at the perforations
/// \param size The size of the C-array and the returned vector
/// \return a vector containing the values for the perforation above.
std::vector<double> communicateBelowValues(double last_value,
const double* current,
std::vector<Scalar> communicateBelowValues(Scalar last_value,
const Scalar* current,
std::size_t size) const;
/// \brief Creates an array of values for the perforation above.
/// \param last_value Value to use for below of the last perforation
/// \param current vector of current values
std::vector<double> communicateBelowValues(double last_value,
const std::vector<double>& current) const;
std::vector<Scalar> communicateBelowValues(Scalar last_value,
const std::vector<Scalar>& current) const;
/// \brief Adds information about the ecl indices of the perforations.
///
@ -301,7 +303,7 @@ public:
/// That is a container that holds data for every perforation no matter where
/// it is stored. Container is ordered via ascendings index of the perforations
/// in the ECL schedule.
const GlobalPerfContainerFactory& getGlobalPerfContainerFactory() const;
const GlobalPerfContainerFactory<Scalar>& getGlobalPerfContainerFactory() const;
private:
@ -326,19 +328,20 @@ private:
std::unique_ptr<Parallel::Communication, DestroyComm> comm_;
/// \brief used to communicate the values for the perforation above.
std::unique_ptr<CommunicateAboveBelow> commAboveBelow_;
std::unique_ptr<CommunicateAboveBelow<Scalar>> commAboveBelow_;
std::unique_ptr<GlobalPerfContainerFactory> globalPerfCont_;
std::unique_ptr<GlobalPerfContainerFactory<Scalar>> globalPerfCont_;
};
/// \brief Class checking that all connections are on active cells
///
/// Works for distributed wells, too
template<class Scalar>
class CheckDistributedWellConnections
{
public:
CheckDistributedWellConnections(const Well& well,
const ParallelWellInfo& info);
const ParallelWellInfo<Scalar>& info);
/// \brief Inidicate that the i-th completion was found
///
@ -351,26 +354,36 @@ public:
private:
std::vector<std::size_t> foundConnections_;
const Well& well_;
const ParallelWellInfo& pwinfo_;
const ParallelWellInfo<Scalar>& pwinfo_;
};
bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
template<class Scalar>
bool operator<(const ParallelWellInfo<Scalar>& well1, const ParallelWellInfo<Scalar>& well2);
bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
template<class Scalar>
bool operator==(const ParallelWellInfo<Scalar>& well1, const ParallelWellInfo<Scalar>& well2);
bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
template<class Scalar>
bool operator!=(const ParallelWellInfo<Scalar>& well1, const ParallelWellInfo<Scalar>& well2);
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
template<class Scalar>
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo<Scalar>& well);
bool operator<( const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
template<class Scalar>
bool operator<( const ParallelWellInfo<Scalar>& well, const std::pair<std::string, bool>& pair);
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
template<class Scalar>
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo<Scalar>& well);
bool operator==(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
template<class Scalar>
bool operator==(const ParallelWellInfo<Scalar>& well, const std::pair<std::string, bool>& pair);
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
template<class Scalar>
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo<Scalar>& well);
bool operator!=(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
template<class Scalar>
bool operator!=(const ParallelWellInfo<Scalar>& well, const std::pair<std::string, bool>& pair);
} // end namespace Opm
#endif // OPM_PARALLELWELLINFO_HEADER_INCLUDED

View File

@ -31,7 +31,7 @@ namespace Opm {
template<class Scalar>
SingleWellState<Scalar>::
SingleWellState(const std::string& name_,
const ParallelWellInfo& pinfo,
const ParallelWellInfo<Scalar>& pinfo,
bool is_producer,
Scalar pressure_first_connection,
const std::vector<PerforationData<Scalar>>& perf_input,
@ -63,7 +63,7 @@ SingleWellState(const std::string& name_,
template<class Scalar>
SingleWellState<Scalar> SingleWellState<Scalar>::
serializationTestObject(const ParallelWellInfo& pinfo)
serializationTestObject(const ParallelWellInfo<Scalar>& pinfo)
{
SingleWellState result("testing", pinfo, true, 1.0, {}, PhaseUsage{}, 2.0);
result.perf_data = PerfData<Scalar>::serializationTestObject();

View File

@ -41,14 +41,14 @@ template<class Scalar>
class SingleWellState {
public:
SingleWellState(const std::string& name,
const ParallelWellInfo& pinfo,
const ParallelWellInfo<Scalar>& pinfo,
bool is_producer,
Scalar presssure_first_connection,
const std::vector<PerforationData<Scalar>>& perf_input,
const PhaseUsage& pu,
Scalar temp);
static SingleWellState serializationTestObject(const ParallelWellInfo& pinfo);
static SingleWellState serializationTestObject(const ParallelWellInfo<Scalar>& pinfo);
template<class Serializer>
void serializeOp(Serializer& serializer)
@ -79,7 +79,7 @@ public:
bool operator==(const SingleWellState&) const;
std::string name;
std::reference_wrapper<const ParallelWellInfo> parallel_info;
std::reference_wrapper<const ParallelWellInfo<Scalar>> parallel_info;
WellStatus status{WellStatus::OPEN};
bool producer;

View File

@ -120,7 +120,7 @@ namespace Opm
using BVectorWell = typename StdWellEval::BVectorWell;
StandardWell(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,

View File

@ -42,7 +42,7 @@ namespace Opm
template<class Scalar, int numEq>
StandardWellEquations<Scalar,numEq>::
StandardWellEquations(const ParallelWellInfo& parallel_well_info)
StandardWellEquations(const ParallelWellInfo<Scalar>& parallel_well_info)
: parallelB_(duneB_, parallel_well_info)
{
duneB_.setBuildMode(OffDiagMatWell::row_wise);

View File

@ -34,7 +34,7 @@
namespace Opm
{
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
template<class Scalar, int numEq> class StandardWellEquationAccess;
#if COMPILE_BDA_BRIDGE
class WellContributions;
@ -65,7 +65,7 @@ public:
// block vector type
using BVector = Dune::BlockVector<Dune::FieldVector<Scalar,numEq>>;
StandardWellEquations(const ParallelWellInfo& parallel_well_info);
StandardWellEquations(const ParallelWellInfo<Scalar>& parallel_well_info);
//! \brief Setup sparsity pattern for the matrices.
//! \param num_cells Total number of cells

View File

@ -70,7 +70,7 @@ namespace Opm
template<typename TypeTag>
StandardWell<TypeTag>::
StandardWell(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,

View File

@ -35,7 +35,7 @@ WGState<Scalar>::WGState(const PhaseUsage& pu) :
template<class Scalar>
WGState<Scalar> WGState<Scalar>::
serializationTestObject(const ParallelWellInfo& pinfo)
serializationTestObject(const ParallelWellInfo<Scalar>& pinfo)
{
WGState result(PhaseUsage{});
result.well_state = WellState<Scalar>::serializationTestObject(pinfo);

View File

@ -26,7 +26,7 @@
namespace Opm {
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
/*
Microscopic class to handle well, group and well test state.
@ -39,7 +39,7 @@ struct WGState
{
WGState(const PhaseUsage& pu);
static WGState serializationTestObject(const ParallelWellInfo& pinfo);
static WGState serializationTestObject(const ParallelWellInfo<Scalar>& pinfo);
void wtest_state(WellTestState wtest_state);

View File

@ -41,7 +41,8 @@ namespace wellhelpers {
template<typename Scalar>
ParallelStandardWellB<Scalar>::
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info)
ParallelStandardWellB(const Matrix& B,
const ParallelWellInfo<Scalar>& parallel_well_info)
: B_(B), parallel_well_info_(parallel_well_info)
{}

View File

@ -28,7 +28,7 @@
namespace Opm {
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
struct WellProductionControls;
struct WellInjectionControls;
enum class WellProducerCMode;
@ -53,7 +53,8 @@ public:
using Block = Dune::DynamicMatrix<Scalar>;
using Matrix = Dune::BCRSMatrix<Block>;
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info);
ParallelStandardWellB(const Matrix& B,
const ParallelWellInfo<Scalar>& parallel_well_info);
//! y = A x
template<class X, class Y>
@ -65,7 +66,7 @@ public:
private:
const Matrix& B_;
const ParallelWellInfo& parallel_well_info_;
const ParallelWellInfo<Scalar>& parallel_well_info_;
};
template<class Scalar>

View File

@ -133,7 +133,7 @@ public:
Indices::numPhases >;
/// Constructor
WellInterface(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,

View File

@ -46,7 +46,7 @@ namespace Opm
template<class FluidSystem>
WellInterfaceFluidSystem<FluidSystem>::
WellInterfaceFluidSystem(const Well& well,
const ParallelWellInfo& parallel_well_info,
const ParallelWellInfo<Scalar>& parallel_well_info,
const int time_step,
const RateConverterType& rate_converter,
const int pvtRegionIdx,

View File

@ -68,7 +68,7 @@ public:
protected:
WellInterfaceFluidSystem(const Well& well,
const ParallelWellInfo& parallel_well_info,
const ParallelWellInfo<Scalar>& parallel_well_info,
const int time_step,
const RateConverterType& rate_converter,
const int pvtRegionIdx,

View File

@ -54,7 +54,7 @@ namespace Opm {
template<class Scalar>
WellInterfaceGeneric<Scalar>::
WellInterfaceGeneric(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const int pvtRegionIdx,
const int num_components,

View File

@ -36,7 +36,7 @@ namespace Opm
class DeferredLogger;
class GuideRate;
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
template<class Scalar> struct PerforationData;
struct PhaseUsage;
class SummaryState;
@ -51,7 +51,7 @@ template<class Scalar>
class WellInterfaceGeneric {
public:
WellInterfaceGeneric(const Well& well,
const ParallelWellInfo& parallel_well_info,
const ParallelWellInfo<Scalar>& parallel_well_info,
const int time_step,
const int pvtRegionIdx,
const int num_components,
@ -131,7 +131,7 @@ public:
const VFPProperties<Scalar>* vfpProperties() const { return vfp_properties_; }
const ParallelWellInfo& parallelWellInfo() const { return parallel_well_info_; }
const ParallelWellInfo<Scalar>& parallelWellInfo() const { return parallel_well_info_; }
const std::vector<Scalar>& perfDepth() const { return perf_depth_; }
@ -271,7 +271,7 @@ protected:
Well well_ecl_;
const ParallelWellInfo& parallel_well_info_;
const ParallelWellInfo<Scalar>& parallel_well_info_;
const int current_step_;
// The pvt region of the well. We assume

View File

@ -36,7 +36,7 @@ namespace Opm
template<class FluidSystem, class Indices>
WellInterfaceIndices<FluidSystem,Indices>::
WellInterfaceIndices(const Well& well,
const ParallelWellInfo& parallel_well_info,
const ParallelWellInfo<Scalar>& parallel_well_info,
const int time_step,
const typename WellInterfaceFluidSystem<FluidSystem>::RateConverterType& rate_converter,
const int pvtRegionIdx,

View File

@ -56,7 +56,7 @@ public:
protected:
WellInterfaceIndices(const Well& well,
const ParallelWellInfo& parallel_well_info,
const ParallelWellInfo<Scalar>& parallel_well_info,
const int time_step,
const typename WellInterfaceFluidSystem<FluidSystem>::RateConverterType& rate_converter,
const int pvtRegionIdx,

View File

@ -53,7 +53,7 @@ namespace Opm
template<typename TypeTag>
WellInterface<TypeTag>::
WellInterface(const Well& well,
const ParallelWellInfo& pw_info,
const ParallelWellInfo<Scalar>& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,

View File

@ -128,7 +128,7 @@ void PackUnpackXConn::unpack([[maybe_unused]] const int link,
namespace Opm {
template<class Scalar>
WellState<Scalar>::WellState(const ParallelWellInfo& pinfo)
WellState<Scalar>::WellState(const ParallelWellInfo<Scalar>& pinfo)
: phase_usage_{}
{
wells_.add("test4",
@ -137,7 +137,7 @@ WellState<Scalar>::WellState(const ParallelWellInfo& pinfo)
template<class Scalar>
WellState<Scalar> WellState<Scalar>::
serializationTestObject(const ParallelWellInfo& pinfo)
serializationTestObject(const ParallelWellInfo<Scalar>& pinfo)
{
WellState result(PhaseUsage{});
result.alq_state = ALQState<Scalar>::serializationTestObject();
@ -150,7 +150,7 @@ serializationTestObject(const ParallelWellInfo& pinfo)
template<class Scalar>
void WellState<Scalar>::base_init(const std::vector<Scalar>& cellPressures,
const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo>>& parallel_well_info,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
const SummaryState& summary_state)
{
@ -171,7 +171,7 @@ void WellState<Scalar>::base_init(const std::vector<Scalar>& cellPressures,
template<class Scalar>
void WellState<Scalar>::initSingleProducer(const Well& well,
const ParallelWellInfo& well_info,
const ParallelWellInfo<Scalar>& well_info,
Scalar pressure_first_connection,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const SummaryState& summary_state)
@ -200,7 +200,7 @@ void WellState<Scalar>::initSingleProducer(const Well& well,
template<class Scalar>
void WellState<Scalar>::initSingleInjector(const Well& well,
const ParallelWellInfo& well_info,
const ParallelWellInfo<Scalar>& well_info,
Scalar pressure_first_connection,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const SummaryState& summary_state)
@ -230,7 +230,7 @@ template<class Scalar>
void WellState<Scalar>::initSingleWell(const std::vector<Scalar>& cellPressures,
const Well& well,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const ParallelWellInfo& well_info,
const ParallelWellInfo<Scalar>& well_info,
const SummaryState& summary_state)
{
Scalar pressure_first_connection = -1;
@ -251,7 +251,7 @@ template<class Scalar>
void WellState<Scalar>::init(const std::vector<Scalar>& cellPressures,
const Schedule& schedule,
const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo>>& parallel_well_info,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
const int report_step,
const WellState* prevState,
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
@ -420,7 +420,7 @@ void WellState<Scalar>::init(const std::vector<Scalar>& cellPressures,
template<class Scalar>
void WellState<Scalar>::resize(const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo>>& parallel_well_info,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
const Schedule& schedule,
const bool handle_ms_well,
const std::size_t numCells,
@ -1044,7 +1044,7 @@ bool WellState<Scalar>::operator==(const WellState& rhs) const
}
template<class Scalar>
const ParallelWellInfo&
const ParallelWellInfo<Scalar>&
WellState<Scalar>::parallelWellInfo(std::size_t well_index) const
{
const auto& ws = this->well(well_index);

View File

@ -50,7 +50,7 @@
namespace Opm
{
class ParallelWellInfo;
template<class Scalar> class ParallelWellInfo;
template<class Scalar> struct PerforationData;
class Schedule;
enum class WellStatus;
@ -68,13 +68,13 @@ public:
static const int Gas = BlackoilPhases::Vapour;
// Only usable for testing purposes
explicit WellState(const ParallelWellInfo& pinfo);
explicit WellState(const ParallelWellInfo<Scalar>& pinfo);
explicit WellState(const PhaseUsage& pu)
: phase_usage_(pu)
{}
static WellState serializationTestObject(const ParallelWellInfo& pinfo);
static WellState serializationTestObject(const ParallelWellInfo<Scalar>& pinfo);
std::size_t size() const
{
@ -91,7 +91,7 @@ public:
return this->size();
}
const ParallelWellInfo& parallelWellInfo(std::size_t well_index) const;
const ParallelWellInfo<Scalar>& parallelWellInfo(std::size_t well_index) const;
/// Allocate and initialize if wells is non-null. Also tries
/// to give useful initial values to the bhp(), wellRates()
@ -99,14 +99,14 @@ public:
void init(const std::vector<Scalar>& cellPressures,
const Schedule& schedule,
const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo>>& parallel_well_info,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
const int report_step,
const WellState* prevState,
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
const SummaryState& summary_state);
void resize(const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo>>& parallel_well_info,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
const Schedule& schedule,
const bool handle_ms_well,
const std::size_t numCells,
@ -378,24 +378,24 @@ private:
/// with -1e100.
void base_init(const std::vector<Scalar>& cellPressures,
const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo>>& parallel_well_info,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
const SummaryState& summary_state);
void initSingleWell(const std::vector<Scalar>& cellPressures,
const Well& well,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const ParallelWellInfo& well_info,
const ParallelWellInfo<Scalar>& well_info,
const SummaryState& summary_state);
void initSingleProducer(const Well& well,
const ParallelWellInfo& well_info,
const ParallelWellInfo<Scalar>& well_info,
Scalar pressure_first_connection,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const SummaryState& summary_state);
void initSingleInjector(const Well& well,
const ParallelWellInfo& well_info,
const ParallelWellInfo<Scalar>& well_info,
Scalar pressure_first_connection,
const std::vector<PerforationData<Scalar>>& well_perf_data,
const SummaryState& summary_state);

View File

@ -149,7 +149,7 @@ TEST_FOR_TYPE_NAMED(BVec, BlockVectorWrapper)
BOOST_AUTO_TEST_CASE(SingleWellState)
{
Opm::ParallelWellInfo dummy;
Opm::ParallelWellInfo<double> dummy;
auto data_out = Opm::SingleWellState<double>::serializationTestObject(dummy);
Opm::Serialization::MemPacker packer;
Opm::Serializer ser(packer);
@ -178,7 +178,7 @@ BOOST_AUTO_TEST_CASE(WellContainer)
BOOST_AUTO_TEST_CASE(WellState)
{
Opm::ParallelWellInfo dummy;
Opm::ParallelWellInfo<double> dummy;
auto data_out = Opm::WellState<double>::serializationTestObject(dummy);
Opm::Serialization::MemPacker packer;
Opm::Serializer ser(packer);
@ -193,7 +193,7 @@ BOOST_AUTO_TEST_CASE(WellState)
BOOST_AUTO_TEST_CASE(WGState)
{
Opm::ParallelWellInfo dummy;
Opm::ParallelWellInfo<double> dummy;
auto data_out = Opm::WGState<double>::serializationTestObject(dummy);
Opm::Serialization::MemPacker packer;
Opm::Serializer ser(packer);
@ -354,7 +354,7 @@ public:
}
private:
ParallelWellInfo dummy;
ParallelWellInfo<double> dummy;
};
}

View File

@ -457,10 +457,10 @@ namespace {
return w;
}
Opm::ParallelWellInfo
Opm::ParallelWellInfo<double>
parallelWellInfo(const Opm::Parallel::Communication& comm)
{
auto pwi = Opm::ParallelWellInfo {
auto pwi = Opm::ParallelWellInfo<double> {
std::pair { std::string{ "P" }, true }, comm
};
@ -469,7 +469,7 @@ namespace {
const auto numLocalPerf = 3;
const auto perfOffset = comm.rank() * numLocalPerf;
auto prev = Opm::ParallelWellInfo::INVALID_ECL_INDEX;
auto prev = Opm::ParallelWellInfo<double>::INVALID_ECL_INDEX;
for (auto perf = 0; perf < numLocalPerf; ++perf) {
const auto curr = perfOffset + perf;
pwi.pushBackEclIndex(prev, curr);
@ -552,7 +552,7 @@ namespace {
Opm::Parallel::Communication comm;
Opm::GridDims cellIndexMap;
Opm::ParallelWBPCalculation wbpCalcService;
Opm::ParallelWellInfo pwi;
Opm::ParallelWellInfo<double> pwi;
};
} // Anonymous namespace

View File

@ -97,7 +97,7 @@ std::ostream& operator<<(std::ostream& os, const std::pair<std::string, bool>& p
}
namespace Opm
{
std::ostream& operator<<(std::ostream& os, const Opm::ParallelWellInfo& w)
std::ostream& operator<<(std::ostream& os, const Opm::ParallelWellInfo<double>& w)
{
return os << "{" << w.name() << " "<< w.hasLocalCells() << " "<<
w.isOwner() << "}";
@ -117,7 +117,7 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
else
pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} };
std::vector<Opm::ParallelWellInfo> well_info;
std::vector<Opm::ParallelWellInfo<double>> well_info;
for (const auto& wellinfo : pairs) {
well_info.emplace_back(wellinfo, Opm::Parallel::Communication());
@ -138,16 +138,16 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
BOOST_CHECK(well_info[0] != well_info[1]);
Opm::ParallelWellInfo well0, well1;
Opm::ParallelWellInfo<double> well0, well1;
BOOST_CHECK(well0 == well1);
#if HAVE_MPI
BOOST_CHECK(well0.communication()==helper.getLocalCommunicator());
#endif
Opm::ParallelWellInfo well2("Test", false);
Opm::ParallelWellInfo<double> well2("Test", false);
std::pair<std::string, bool> pwell={"Test", true};
BOOST_CHECK(well2 < pwell);
Opm::ParallelWellInfo well3("Test", true);
Opm::ParallelWellInfo<double> well3("Test", true);
BOOST_CHECK(! (well3 < pwell));
pwell.second = false;
BOOST_CHECK(! (well3 < pwell));
@ -171,7 +171,7 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison)
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf)
{
auto comm = Dune::MPIHelper::getLocalCommunicator();
Opm::CommunicateAboveBelow commAboveBelow{ comm };
Opm::CommunicateAboveBelow<double> commAboveBelow{ comm };
for(std::size_t count=0; count < 2; ++count)
{
std::vector<int> eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
@ -206,7 +206,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf)
BOOST_AUTO_TEST_CASE(CommunicateAboveBelowSelf1)
{
auto comm = Dune::MPIHelper::getLocalCommunicator();
Opm::CommunicateAboveBelow commAboveBelow{ comm };
Opm::CommunicateAboveBelow<double> commAboveBelow{ comm };
for(std::size_t count=0; count < 2; ++count)
{
std::vector<int> eclIndex = {0};
@ -291,7 +291,7 @@ BOOST_AUTO_TEST_CASE(CommunicateAboveBelowParallel)
{
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
Opm::CommunicateAboveBelow commAboveBelow{ comm };
Opm::CommunicateAboveBelow<double> commAboveBelow{ comm };
for(std::size_t count=0; count < 2; ++count)
{
auto globalEclIndex = createGlobalEclIndex(comm);
@ -351,7 +351,7 @@ BOOST_AUTO_TEST_CASE(PartialSumself)
{
auto comm = Dune::MPIHelper::getLocalCommunicator();
Opm::CommunicateAboveBelow commAboveBelow{ comm };
Opm::CommunicateAboveBelow<double> commAboveBelow{ comm };
std::vector<int> eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
std::vector<double> current(eclIndex.size());
std::transform(eclIndex.begin(), eclIndex.end(), current.begin(),
@ -383,7 +383,7 @@ BOOST_AUTO_TEST_CASE(PartialSumParallel)
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
Opm::CommunicateAboveBelow commAboveBelow{ comm };
Opm::CommunicateAboveBelow<double> commAboveBelow{ comm };
auto globalEclIndex = createGlobalEclIndex(comm);
std::vector<double> globalCurrent(globalEclIndex.size());
initRandomNumbers(std::begin(globalCurrent), std::end(globalCurrent),
@ -411,7 +411,7 @@ void testGlobalPerfFactoryParallel(int num_component, bool local_consecutive = f
{
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
Opm::ParallelWellInfo wellInfo{ {"Test", true }, comm };
Opm::ParallelWellInfo<double> wellInfo{ {"Test", true }, comm };
auto globalEclIndex = createGlobalEclIndex(comm);
std::vector<double> globalCurrent(globalEclIndex.size() * num_component);
std::vector<double> globalAdd(globalEclIndex.size() * num_component);
@ -425,7 +425,7 @@ void testGlobalPerfFactoryParallel(int num_component, bool local_consecutive = f
local_consecutive);
// A hack to get local values to add.
Opm::ParallelWellInfo dummy{ {"Test", true }, comm };
Opm::ParallelWellInfo<double> dummy{ {"Test", true }, comm };
auto localAdd = populateCommAbove(dummy, comm, globalEclIndex,
globalAdd, num_component,
local_consecutive);
@ -476,7 +476,7 @@ BOOST_AUTO_TEST_CASE(GlobalPerfFactoryParallel1)
BOOST_AUTO_TEST_CASE(EmptyWell) {
auto comm = Opm::Parallel::Communication(Dune::MPIHelper::getCommunicator());
Opm::ParallelWellInfo pw({"WELL1", true}, comm);
Opm::ParallelWellInfo<double> pw({"WELL1", true}, comm);
pw.communicateFirstPerforation(false);
double local_p = 1;
auto global_p = pw.broadcastFirstPerforationValue(local_p);

View File

@ -131,7 +131,7 @@ BOOST_AUTO_TEST_CASE(TestStandardWellInput) {
pdata[c].ecl_index = c;
}
Opm::ParallelWellInfo pinfo{well.name()};
Opm::ParallelWellInfo<double> pinfo{well.name()};
BOOST_CHECK_THROW( StandardWell( well, pinfo, -1, param, *rateConverter, 0, 3, 3, 0, pdata), std::invalid_argument);
}
@ -166,7 +166,7 @@ BOOST_AUTO_TEST_CASE(TestBehavoir) {
pdata[c].ecl_index = c;
}
Opm::ParallelWellInfo pinfo{wells_ecl[w].name()};
Opm::ParallelWellInfo<double> pinfo{wells_ecl[w].name()};
wells.emplace_back(new StandardWell(wells_ecl[w], pinfo, current_timestep, param, *rateConverter, 0, 3, 3, w, pdata) );
}
}

View File

@ -139,7 +139,7 @@ struct Setup
namespace {
Opm::WellState<double>
buildWellState(const Setup& setup, const std::size_t timeStep,
std::vector<Opm::ParallelWellInfo>& pinfos)
std::vector<Opm::ParallelWellInfo<double>>& pinfos)
{
auto state = Opm::WellState<double>{setup.pu};
@ -149,7 +149,7 @@ namespace {
auto wells = setup.sched.getWells(timeStep);
pinfos.resize(wells.size());
std::vector<std::reference_wrapper<Opm::ParallelWellInfo>> ppinfos;
std::vector<std::reference_wrapper<Opm::ParallelWellInfo<double>>> ppinfos;
auto pw = pinfos.begin();
for (const auto& well : wells)
@ -259,7 +259,7 @@ BOOST_AUTO_TEST_CASE(Linearisation)
const Setup setup{ "msw.data" };
const auto tstep = std::size_t{0};
std::vector<Opm::ParallelWellInfo> pinfos;
std::vector<Opm::ParallelWellInfo<double>> pinfos;
const auto wstate = buildWellState(setup, tstep, pinfos);
const auto& ws = wstate.well("PROD01");
@ -276,7 +276,7 @@ BOOST_AUTO_TEST_CASE(Pressure)
const Setup setup{ "msw.data" };
const auto tstep = std::size_t{0};
std::vector<Opm::ParallelWellInfo> pinfos;
std::vector<Opm::ParallelWellInfo<double>> pinfos;
auto wstate = buildWellState(setup, tstep, pinfos);
const auto& wells = setup.sched.getWells(tstep);
@ -315,7 +315,7 @@ BOOST_AUTO_TEST_CASE(Rates)
const Setup setup{ "msw.data" };
const auto tstep = std::size_t{0};
std::vector<Opm::ParallelWellInfo> pinfos;
std::vector<Opm::ParallelWellInfo<double>> pinfos;
auto wstate = buildWellState(setup, tstep, pinfos);
const auto wells = setup.sched.getWells(tstep);
@ -368,7 +368,7 @@ BOOST_AUTO_TEST_CASE(STOP_well)
*/
const Setup setup{ "wells_manager_data_wellSTOP.data" };
std::vector<Opm::ParallelWellInfo> pinfos;
std::vector<Opm::ParallelWellInfo<double>> pinfos;
auto wstate = buildWellState(setup, 0, pinfos);
for (std::size_t well_index = 0; well_index < setup.sched.numWells(0); well_index++) {
const auto& ws = wstate.well(well_index);
@ -525,7 +525,7 @@ BOOST_AUTO_TEST_CASE(TESTSegmentState) {
BOOST_AUTO_TEST_CASE(TESTSegmentState2) {
const Setup setup{ "msw.data" };
std::vector<Opm::ParallelWellInfo> pinfo;
std::vector<Opm::ParallelWellInfo<double>> pinfo;
const auto wstate = buildWellState(setup, 0, pinfo);
const auto& well = setup.sched.getWell("PROD01", 0);
const auto& ws = wstate.well("PROD01");
@ -581,7 +581,7 @@ BOOST_AUTO_TEST_CASE(TESTPerfData) {
BOOST_AUTO_TEST_CASE(TestSingleWellState) {
Opm::ParallelWellInfo pinfo;
Opm::ParallelWellInfo<double> pinfo;
std::vector<Opm::PerforationData<double>> connections = {{0,1,1,0,0},{1,1,1,0,1},{2,1,1,0,2}};
Opm::PhaseUsage pu;