Use small wrapper to manage shared pointers in ScheduleState
This commit is contained in:
parent
39720fa3a4
commit
acb1284efd
@ -75,11 +75,11 @@ UDAValue msim::uda_val() {
|
||||
|
||||
|
||||
void msim::post_step(Schedule& schedule, Action::State& action_state, SummaryState& st, data::Solution& /* sol */, data::Wells& /* well_data */, data::GroupAndNetworkValues& /* grp_nwrk_data */, size_t report_step) {
|
||||
const auto& actions = schedule[report_step].actions();
|
||||
const auto& actions = schedule[report_step].actions.get();
|
||||
if (actions.empty())
|
||||
return;
|
||||
|
||||
Action::Context context( st , schedule[report_step].wlist_manager());
|
||||
Action::Context context( st , schedule[report_step].wlist_manager.get());
|
||||
|
||||
auto sim_time = schedule.simTime(report_step);
|
||||
for (const auto& action : actions.pending(action_state, sim_time)) {
|
||||
|
@ -350,68 +350,40 @@ namespace Opm
|
||||
serializer.vector(snapshots);
|
||||
m_static.serializeOp(serializer);
|
||||
|
||||
pack_unpack<WellTestConfig, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::wtest_config),
|
||||
std::mem_fn(&ScheduleState::update_wtest_config));
|
||||
|
||||
pack_unpack<GConSale, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::gconsale),
|
||||
std::mem_fn(&ScheduleState::update_gconsale));
|
||||
|
||||
pack_unpack<GConSump, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::gconsump),
|
||||
std::mem_fn(&ScheduleState::update_gconsump));
|
||||
|
||||
pack_unpack<WListManager, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::wlist_manager),
|
||||
std::mem_fn(&ScheduleState::update_wlist_manager));
|
||||
|
||||
pack_unpack<Network::ExtNetwork, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::network),
|
||||
std::mem_fn(&ScheduleState::update_network));
|
||||
|
||||
pack_unpack<RPTConfig, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::rpt_config),
|
||||
std::mem_fn(&ScheduleState::update_rpt_config));
|
||||
|
||||
pack_unpack<Action::Actions, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::actions),
|
||||
std::mem_fn(&ScheduleState::update_actions));
|
||||
|
||||
pack_unpack<UDQActive, Serializer>(serializer,
|
||||
std::mem_fn(&ScheduleState::udq_active),
|
||||
std::mem_fn(&ScheduleState::update_udq_active));
|
||||
pack_unpack<PAvg, Serializer>(serializer);
|
||||
pack_unpack<WellTestConfig, Serializer>(serializer);
|
||||
pack_unpack<GConSale, Serializer>(serializer);
|
||||
pack_unpack<GConSump, Serializer>(serializer);
|
||||
pack_unpack<WListManager, Serializer>(serializer);
|
||||
pack_unpack<Network::ExtNetwork, Serializer>(serializer);
|
||||
pack_unpack<RPTConfig, Serializer>(serializer);
|
||||
pack_unpack<Action::Actions, Serializer>(serializer);
|
||||
pack_unpack<UDQActive, Serializer>(serializer);
|
||||
pack_unpack<NameOrder, Serializer>(serializer);
|
||||
}
|
||||
|
||||
template <typename T, class Serializer>
|
||||
void pack_unpack(Serializer& serializer,
|
||||
const std::function<T(const ScheduleState &)>& get,
|
||||
const std::function<void(ScheduleState &, const T& )>& set) {
|
||||
|
||||
void pack_unpack(Serializer& serializer) {
|
||||
std::vector<T> value_list;
|
||||
std::vector<std::size_t> index_list;
|
||||
|
||||
if (serializer.isSerializing())
|
||||
pack_state<T>(value_list, index_list, get);
|
||||
pack_state<T>(value_list, index_list);
|
||||
|
||||
serializer.vector(value_list);
|
||||
serializer.template vector<std::size_t, false>(index_list);
|
||||
|
||||
if (!serializer.isSerializing())
|
||||
unpack_state<T>(value_list, index_list, set);
|
||||
unpack_state<T>(value_list, index_list);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void pack_state(std::vector<T>& value_list, std::vector<std::size_t>& index_list, const std::function<T(const ScheduleState &)>& get) {
|
||||
if (this->snapshots.empty())
|
||||
return;
|
||||
|
||||
value_list.push_back( get( this->snapshots[0] ));
|
||||
index_list.push_back( 0 );
|
||||
for (std::size_t index = 1; index < this->snapshots.size(); index++) {
|
||||
const auto& value = get( this->snapshots[index] );
|
||||
if (!(value == value_list.back())) {
|
||||
void pack_state(std::vector<T>& value_list, std::vector<std::size_t>& index_list) {
|
||||
for (std::size_t index = 0; index < this->snapshots.size(); index++) {
|
||||
const auto& member = this->snapshots[index].get<T>();
|
||||
const auto& value = member.get();
|
||||
if (value_list.empty() || !(value == value_list.back())) {
|
||||
value_list.push_back( value );
|
||||
index_list.push_back( index );
|
||||
}
|
||||
@ -419,7 +391,7 @@ namespace Opm
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void unpack_state(const std::vector<T>& value_list, const std::vector<std::size_t>& index_list, const std::function<void(ScheduleState &, const T& )>& set) {
|
||||
void unpack_state(const std::vector<T>& value_list, const std::vector<std::size_t>& index_list) {
|
||||
std::size_t unique_index = 0;
|
||||
while (unique_index < value_list.size()) {
|
||||
const auto& value = value_list[unique_index];
|
||||
@ -428,8 +400,10 @@ namespace Opm
|
||||
if (unique_index < (value_list.size() - 1))
|
||||
last_index = index_list[unique_index + 1];
|
||||
|
||||
for (std::size_t index=first_index; index < last_index; index++)
|
||||
set( this->snapshots[index], value );
|
||||
auto& target_state = this->snapshots[first_index];
|
||||
target_state.get<T>().update( std::move(value) );
|
||||
for (std::size_t index=first_index + 1; index < last_index; index++)
|
||||
this->snapshots[index].update_ptr<T>( target_state );
|
||||
|
||||
unique_index++;
|
||||
}
|
||||
|
@ -54,8 +54,64 @@ namespace Opm {
|
||||
|
||||
class WellTestConfig;
|
||||
|
||||
|
||||
|
||||
class ScheduleState {
|
||||
public:
|
||||
/*
|
||||
In the SCHEDULE section typically all information is a function of
|
||||
time, and the ScheduleState class is used to manage a snapshot of
|
||||
state at one point in time. Typically a large part of the
|
||||
configuration does not change between timesteps and consecutive
|
||||
ScheduleState instances are very similar, to handle this many of the
|
||||
ScheduleState members are implemented as std::shared_ptr<>s.
|
||||
|
||||
The ptr_member<T> class is a small wrapper around the
|
||||
std::shared_ptr<T>. The ptr_member<T> class is meant to be internal to
|
||||
the Schedule implementation and downstream should only access this
|
||||
indirectly like e.g.
|
||||
|
||||
const auto& gconsum = sched_state.gconsump();
|
||||
|
||||
The remaining details of the ptr_member<T> class are heavily
|
||||
influenced by the code used to serialize the Schedule information.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
template <typename T>
|
||||
class ptr_member {
|
||||
public:
|
||||
const T& get() const {
|
||||
return *this->m_data;
|
||||
}
|
||||
|
||||
/*
|
||||
This will allocate new storage and assign @object to the new
|
||||
storage.
|
||||
*/
|
||||
void update(T object)
|
||||
{
|
||||
this->m_data = std::make_shared<T>( std::move(object) );
|
||||
}
|
||||
|
||||
/*
|
||||
Will reassign the pointer to point to existing shared instance
|
||||
@other.
|
||||
*/
|
||||
void update(const ptr_member<T>& other)
|
||||
{
|
||||
this->m_data = other.m_data;
|
||||
}
|
||||
|
||||
const T& operator()() const {
|
||||
return *this->m_data;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<T> m_data;
|
||||
};
|
||||
|
||||
ScheduleState() = default;
|
||||
explicit ScheduleState(const std::chrono::system_clock::time_point& start_time);
|
||||
ScheduleState(const std::chrono::system_clock::time_point& start_time, const std::chrono::system_clock::time_point& end_time);
|
||||
@ -70,9 +126,6 @@ namespace Opm {
|
||||
bool operator==(const ScheduleState& other) const;
|
||||
static ScheduleState serializeObject();
|
||||
|
||||
void update_pavg(PAvg pavg);
|
||||
const PAvg& pavg() const;
|
||||
|
||||
void update_tuning(Tuning tuning);
|
||||
Tuning& tuning();
|
||||
const Tuning& tuning() const;
|
||||
@ -103,23 +156,49 @@ namespace Opm {
|
||||
Well::ProducerCMode whistctl() const;
|
||||
void update_whistctl(Well::ProducerCMode whistctl);
|
||||
|
||||
const WellTestConfig& wtest_config() const;
|
||||
void update_wtest_config(WellTestConfig wtest_config);
|
||||
/*********************************************************************/
|
||||
|
||||
const WListManager& wlist_manager() const;
|
||||
void update_wlist_manager(WListManager wlist_manager);
|
||||
ptr_member<PAvg> pavg;
|
||||
ptr_member<WellTestConfig> wtest_config;
|
||||
ptr_member<GConSale> gconsale;
|
||||
ptr_member<GConSump> gconsump;
|
||||
ptr_member<WListManager> wlist_manager;
|
||||
ptr_member<Network::ExtNetwork> network;
|
||||
ptr_member<RPTConfig> rpt_config;
|
||||
ptr_member<Action::Actions> actions;
|
||||
ptr_member<UDQActive> udq_active;
|
||||
ptr_member<NameOrder> well_order;
|
||||
|
||||
const GConSale& gconsale() const;
|
||||
void update_gconsale(GConSale gconsale);
|
||||
template <typename T>
|
||||
ptr_member<T>& get() {
|
||||
if constexpr ( std::is_same_v<T, PAvg> )
|
||||
return this->pavg;
|
||||
else if constexpr ( std::is_same_v<T, WellTestConfig> )
|
||||
return this->wtest_config;
|
||||
else if constexpr ( std::is_same_v<T, GConSale> )
|
||||
return this->gconsale;
|
||||
else if constexpr ( std::is_same_v<T, GConSump> )
|
||||
return this->gconsump;
|
||||
else if constexpr ( std::is_same_v<T, WListManager> )
|
||||
return this->wlist_manager;
|
||||
else if constexpr ( std::is_same_v<T, Network::ExtNetwork> )
|
||||
return this->network;
|
||||
else if constexpr ( std::is_same_v<T, RPTConfig> )
|
||||
return this->rpt_config;
|
||||
else if constexpr ( std::is_same_v<T, Action::Actions> )
|
||||
return this->actions;
|
||||
else if constexpr ( std::is_same_v<T, UDQActive> )
|
||||
return this->udq_active;
|
||||
else if constexpr ( std::is_same_v<T, NameOrder> )
|
||||
return this->well_order;
|
||||
}
|
||||
|
||||
const GConSump& gconsump() const;
|
||||
void update_gconsump(GConSump gconsump);
|
||||
template <typename T>
|
||||
void update_ptr(ScheduleState& other) {
|
||||
auto& member = this->get<T>();
|
||||
member.update( other.get<T>() );
|
||||
}
|
||||
|
||||
const Network::ExtNetwork& network() const;
|
||||
void update_network(Network::ExtNetwork network);
|
||||
|
||||
const RPTConfig& rpt_config() const;
|
||||
void update_rpt_config(RPTConfig rpt_config);
|
||||
|
||||
std::vector<std::reference_wrapper<const VFPProdTable>> vfpprod() const;
|
||||
const VFPProdTable& vfpprod(int table_id) const;
|
||||
@ -131,21 +210,10 @@ namespace Opm {
|
||||
void update_vfpinj(VFPInjTable vfpinj);
|
||||
std::optional<std::reference_wrapper<const VFPInjTable>> try_vfpinj(int table_id) const;
|
||||
|
||||
const Action::Actions& actions() const;
|
||||
void update_actions(Action::Actions actions);
|
||||
|
||||
const UDQActive& udq_active() const;
|
||||
void update_udq_active(UDQActive udq_active);
|
||||
|
||||
const NameOrder& well_order() const;
|
||||
void well_order(const std::string& well);
|
||||
void update_well_order(NameOrder well_order);
|
||||
|
||||
template<class Serializer>
|
||||
void serializeOp(Serializer& serializer) {
|
||||
serializer(m_start_time);
|
||||
serializer(m_end_time);
|
||||
serializer(m_pavg);
|
||||
m_tuning.serializeOp(serializer);
|
||||
serializer(m_nupcol);
|
||||
m_oilvap.serializeOp(serializer);
|
||||
@ -153,7 +221,6 @@ namespace Opm {
|
||||
m_wellgroup_events.serializeOp(serializer);
|
||||
serializer.vector(m_geo_keywords);
|
||||
m_message_limits.serializeOp(serializer);
|
||||
serializer(m_well_order);
|
||||
serializer(m_whistctl_mode);
|
||||
serializer.map(m_vfpprod);
|
||||
serializer.map(m_vfpinj);
|
||||
@ -164,7 +231,6 @@ namespace Opm {
|
||||
std::chrono::system_clock::time_point m_start_time;
|
||||
std::optional<std::chrono::system_clock::time_point> m_end_time;
|
||||
|
||||
std::shared_ptr<PAvg> m_pavg;
|
||||
Tuning m_tuning;
|
||||
int m_nupcol;
|
||||
OilVaporizationProperties m_oilvap;
|
||||
@ -173,15 +239,6 @@ namespace Opm {
|
||||
std::vector<DeckKeyword> m_geo_keywords;
|
||||
MessageLimits m_message_limits;
|
||||
Well::ProducerCMode m_whistctl_mode = Well::ProducerCMode::CMODE_UNDEFINED;
|
||||
std::shared_ptr<NameOrder> m_well_order;
|
||||
std::shared_ptr<WellTestConfig> m_wtest_config;
|
||||
std::shared_ptr<GConSale> m_gconsale;
|
||||
std::shared_ptr<GConSump> m_gconsump;
|
||||
std::shared_ptr<WListManager> m_wlist_manager;
|
||||
std::shared_ptr<Network::ExtNetwork> m_network;
|
||||
std::shared_ptr<RPTConfig> m_rptconfig;
|
||||
std::shared_ptr<Action::Actions> m_actions;
|
||||
std::shared_ptr<UDQActive> m_udq_active;
|
||||
std::map<int, std::shared_ptr<VFPProdTable>> m_vfpprod;
|
||||
std::map<int, std::shared_ptr<VFPInjTable>> m_vfpinj;
|
||||
};
|
||||
|
@ -536,7 +536,7 @@ const std::map<cmp_enum, int> cmpToIndex = {
|
||||
act_res(const Opm::Schedule& sched, const Opm::Action::State& action_state, const Opm::SummaryState& smry, const std::size_t sim_step, std::vector<Opm::Action::ActionX>::const_iterator act_x) {
|
||||
auto sim_time = sched.simTime(sim_step);
|
||||
if (act_x->ready(action_state, sim_time)) {
|
||||
Opm::Action::Context context(smry, sched[sim_step].wlist_manager());
|
||||
Opm::Action::Context context(smry, sched[sim_step].wlist_manager.get());
|
||||
return act_x->eval(context);
|
||||
} else
|
||||
return Opm::Action::Result(false);
|
||||
@ -687,7 +687,7 @@ captureDeclaredActionxData( const Opm::Schedule& sched,
|
||||
const std::vector<int>& actDims,
|
||||
const std::size_t simStep)
|
||||
{
|
||||
const auto& acts = sched[simStep].actions();
|
||||
const auto& acts = sched[simStep].actions.get();
|
||||
std::size_t act_ind = 0;
|
||||
for (auto actx_it = acts.begin(); actx_it < acts.end(); actx_it++) {
|
||||
{
|
||||
|
@ -812,7 +812,7 @@ captureDeclaredUDQData(const Opm::Schedule& sched,
|
||||
}
|
||||
|
||||
|
||||
auto udq_active = sched[simStep].udq_active();
|
||||
auto udq_active = sched[simStep].udq_active.get();
|
||||
if (udq_active) {
|
||||
const auto& udq_records = udq_active.get_iuad();
|
||||
int cnt_iuad = 0;
|
||||
|
@ -929,8 +929,8 @@ namespace {
|
||||
std::vector<std::pair<std::string, Opm::Action::Result>>
|
||||
act_res_stat(const Opm::Schedule& sched, const Opm::Action::State& action_state, const Opm::SummaryState& smry, const std::size_t sim_step) {
|
||||
std::vector<std::pair<std::string, Opm::Action::Result>> results;
|
||||
const auto& acts = sched[sim_step].actions();
|
||||
Opm::Action::Context context(smry, sched[sim_step].wlist_manager());
|
||||
const auto& acts = sched[sim_step].actions.get();
|
||||
Opm::Action::Context context(smry, sched[sim_step].wlist_manager.get());
|
||||
auto sim_time = sched.simTime(sim_step);
|
||||
for (const auto& action : acts.pending(action_state, sim_time)) {
|
||||
auto result = action->eval(context);
|
||||
|
@ -121,7 +121,7 @@ createActionxDims( const Runspec& rspec,
|
||||
const Schedule& sched,
|
||||
const std::size_t simStep)
|
||||
{
|
||||
const auto& acts = sched[simStep].actions();
|
||||
const auto& acts = sched[simStep].actions.get();
|
||||
std::vector<int> actDims(9);
|
||||
|
||||
//No of Actionx keywords
|
||||
|
@ -168,7 +168,7 @@ namespace {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const auto& udqAct = sched[simStep].udq_active();
|
||||
const auto& udqAct = sched[simStep].udq_active.get();
|
||||
const auto& iuad = udqAct.get_iuad();
|
||||
|
||||
return std::count_if(iuad.begin(), iuad.end(), [](const Opm::UDQActive::Record rec) {
|
||||
@ -184,7 +184,7 @@ namespace {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const auto& udqAct = sched[simStep].udq_active();
|
||||
const auto& udqAct = sched[simStep].udq_active.get();
|
||||
const auto& iuap = udqAct.get_iuap();
|
||||
|
||||
return std::count_if(iuap.begin(), iuap.end(), [](const Opm::UDQActive::InputRecord rec) {
|
||||
@ -477,7 +477,7 @@ createInteHead(const EclipseState& es,
|
||||
const auto ngmax = (report_step == 0)
|
||||
? 0 : numGroupsInField(sched, lookup_step);
|
||||
|
||||
const auto& acts = sched[lookup_step].actions();
|
||||
const auto& acts = sched[lookup_step].actions.get();
|
||||
const auto& rspec = es.runspec();
|
||||
const auto& tdim = es.getTableManager();
|
||||
const auto& rdim = tdim.getRegdims();
|
||||
|
@ -263,7 +263,7 @@ void EclipseIO::writeTimeStep(const Action::State& action_state,
|
||||
|
||||
|
||||
if (!isSubstep) {
|
||||
for (const auto& report : schedule[report_step].rpt_config()) {
|
||||
for (const auto& report : schedule[report_step].rpt_config.get()) {
|
||||
std::stringstream ss;
|
||||
const auto& unit_system = this->impl->es.getUnits();
|
||||
|
||||
|
@ -3209,7 +3209,7 @@ void Opm::out::Summary::SummaryImplementation::configureUDQ(const SummaryConfig&
|
||||
for (const auto& udq_ptr : sched.udqConfigList())
|
||||
udq_ptr->required_summary(summary_keys);
|
||||
|
||||
for (const auto& action : sched.back().actions())
|
||||
for (const auto& action : sched.back().actions.get())
|
||||
action.required_summary(summary_keys);
|
||||
|
||||
for (const auto& key : summary_keys) {
|
||||
|
@ -119,7 +119,7 @@ namespace {
|
||||
|
||||
|
||||
void Schedule::handleBRANPROP(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
auto ext_network = this->snapshots.back().network();
|
||||
auto ext_network = this->snapshots.back().network.get();
|
||||
|
||||
for (const auto& record : handlerContext.keyword) {
|
||||
const auto& downtree_node = record.getItem<ParserKeywords::BRANPROP::DOWNTREE_NODE>().get<std::string>(0);
|
||||
@ -141,7 +141,7 @@ namespace {
|
||||
}
|
||||
}
|
||||
|
||||
this->snapshots.back().update_network( std::move( ext_network ));
|
||||
this->snapshots.back().network.update( std::move( ext_network ));
|
||||
}
|
||||
|
||||
void Schedule::handleCOMPDAT(const HandlerContext& handlerContext, const ParseContext& parseContext, ErrorGuard& errors) {
|
||||
@ -463,9 +463,9 @@ namespace {
|
||||
this->snapshots.back().events().addEvent(ScheduleEvents::GROUP_PRODUCTION_UPDATE);
|
||||
this->snapshots.back().wellgroup_events().addEvent( group_name, ScheduleEvents::GROUP_PRODUCTION_UPDATE);
|
||||
|
||||
auto udq_active = UDQActive(this->snapshots.back().udq_active());
|
||||
auto udq_active = this->snapshots.back().udq_active.get();
|
||||
if (production.updateUDQActive(this->getUDQConfig(current_step), udq_active))
|
||||
this->snapshots.back().update_udq_active( std::move(udq_active));
|
||||
this->snapshots.back().udq_active.update( std::move(udq_active));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -473,7 +473,7 @@ namespace {
|
||||
}
|
||||
|
||||
void Schedule::handleGCONSALE(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
auto new_gconsale = this->snapshots.back().gconsale();
|
||||
auto new_gconsale = this->snapshots.back().gconsale.get();
|
||||
for (const auto& record : handlerContext.keyword) {
|
||||
const std::string& groupName = record.getItem("GROUP").getTrimmedString(0);
|
||||
auto sales_target = record.getItem("SALES_TARGET").get<UDAValue>(0);
|
||||
@ -491,11 +491,11 @@ namespace {
|
||||
this->updateGroup(std::move(group_ptr), handlerContext.currentStep);
|
||||
}
|
||||
}
|
||||
this->snapshots.back().update_gconsale( std::move(new_gconsale) );
|
||||
this->snapshots.back().gconsale.update( std::move(new_gconsale) );
|
||||
}
|
||||
|
||||
void Schedule::handleGCONSUMP(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
auto new_gconsump = this->snapshots.back().gconsump();
|
||||
auto new_gconsump = this->snapshots.back().gconsump.get();
|
||||
for (const auto& record : handlerContext.keyword) {
|
||||
const std::string& groupName = record.getItem("GROUP").getTrimmedString(0);
|
||||
auto consumption_rate = record.getItem("GAS_CONSUMP_RATE").get<UDAValue>(0);
|
||||
@ -510,7 +510,7 @@ namespace {
|
||||
|
||||
new_gconsump.add(groupName, consumption_rate, import_rate, network_node_name, udqconfig, this->m_static.m_unit_system);
|
||||
}
|
||||
this->snapshots.back().update_gconsump( std::move(new_gconsump) );
|
||||
this->snapshots.back().gconsump.update( std::move(new_gconsump) );
|
||||
}
|
||||
|
||||
void Schedule::handleGEFAC(const HandlerContext& handlerContext, const ParseContext& parseContext, ErrorGuard& errors) {
|
||||
@ -690,7 +690,7 @@ namespace {
|
||||
}
|
||||
|
||||
void Schedule::handleNODEPROP(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
auto ext_network = this->snapshots.back().network();
|
||||
auto ext_network = this->snapshots.back().network.get();
|
||||
|
||||
for (const auto& record : handlerContext.keyword) {
|
||||
const auto& name = record.getItem<ParserKeywords::NODEPROP::NAME>().get<std::string>(0);
|
||||
@ -726,7 +726,7 @@ namespace {
|
||||
ext_network.add_node(node);
|
||||
}
|
||||
|
||||
this->snapshots.back().update_network( ext_network );
|
||||
this->snapshots.back().network.update( ext_network );
|
||||
}
|
||||
|
||||
void Schedule::handleNUPCOL(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
@ -742,7 +742,7 @@ namespace {
|
||||
|
||||
void Schedule::handleRPTSCHED(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
printf("snapshost.size(): %ld \n", this->snapshots.size());
|
||||
this->snapshots.back().update_rpt_config( RPTConfig(handlerContext.keyword ));
|
||||
this->snapshots.back().rpt_config.update( RPTConfig(handlerContext.keyword ));
|
||||
}
|
||||
|
||||
void Schedule::handleTUNING(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
@ -966,9 +966,9 @@ namespace {
|
||||
this->updateWell(std::move(well2), handlerContext.currentStep);
|
||||
}
|
||||
|
||||
auto udq_active = UDQActive( this->snapshots.back().udq_active() );
|
||||
auto udq_active = this->snapshots.back().udq_active.get();
|
||||
if (properties->updateUDQActive(this->getUDQConfig(handlerContext.currentStep), udq_active))
|
||||
this->snapshots.back().update_udq_active( std::move(udq_active));
|
||||
this->snapshots.back().udq_active.update( std::move(udq_active));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1028,9 +1028,9 @@ namespace {
|
||||
}
|
||||
}
|
||||
|
||||
auto udq_active = UDQActive(this->snapshots.back().udq_active());
|
||||
auto udq_active = this->snapshots.back().udq_active.get();
|
||||
if (injection->updateUDQActive(this->getUDQConfig(handlerContext.currentStep), udq_active))
|
||||
this->snapshots.back().update_udq_active( std::move(udq_active) );
|
||||
this->snapshots.back().udq_active.update( std::move(udq_active) );
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1289,9 +1289,9 @@ namespace {
|
||||
if (cmode == Well::WELTARGCMode::GUID)
|
||||
update |= well2->updateWellGuideRate(new_arg.get<double>());
|
||||
|
||||
auto udq_active = UDQActive(this->snapshots.back().udq_active());
|
||||
auto udq_active = this->snapshots.back().udq_active.get();
|
||||
if (prop->updateUDQActive(this->getUDQConfig(handlerContext.currentStep), udq_active))
|
||||
this->snapshots.back().update_udq_active( std::move(udq_active));
|
||||
this->snapshots.back().udq_active.update( std::move(udq_active));
|
||||
} else {
|
||||
auto inj = std::make_shared<Well::WellInjectionProperties>(well2->getInjectionProperties());
|
||||
inj->handleWELTARG(cmode, new_arg, SiFactorP);
|
||||
@ -1488,12 +1488,12 @@ namespace {
|
||||
|
||||
void Schedule::handleWLIST(const HandlerContext& handlerContext, const ParseContext&, ErrorGuard&) {
|
||||
const std::string legal_actions = "NEW:ADD:DEL:MOV";
|
||||
auto new_wlm = this->snapshots.back().wlist_manager();
|
||||
for (const auto& record : handlerContext.keyword) {
|
||||
const std::string& name = record.getItem("NAME").getTrimmedString(0);
|
||||
const std::string& action = record.getItem("ACTION").getTrimmedString(0);
|
||||
const std::vector<std::string>& well_args = record.getItem("WELLS").getData<std::string>();
|
||||
std::vector<std::string> wells;
|
||||
auto new_wlm = this->snapshots.back().wlist_manager.get();
|
||||
|
||||
if (legal_actions.find(action) == std::string::npos)
|
||||
throw std::invalid_argument("The action:" + action + " is not recognized.");
|
||||
@ -1528,8 +1528,7 @@ namespace {
|
||||
for (const auto& well : wells)
|
||||
wlist.add(well);
|
||||
}
|
||||
|
||||
this->snapshots.back().update_wlist_manager(new_wlm);
|
||||
this->snapshots.back().wlist_manager.update( std::move(new_wlm) );
|
||||
}
|
||||
}
|
||||
|
||||
@ -1755,7 +1754,7 @@ namespace {
|
||||
}
|
||||
|
||||
void Schedule::handleWTEST(const HandlerContext& handlerContext, const ParseContext& parseContext, ErrorGuard& errors) {
|
||||
auto new_config = this->snapshots.back().wtest_config();
|
||||
auto new_config = this->snapshots.back().wtest_config.get();
|
||||
for (const auto& record : handlerContext.keyword) {
|
||||
const std::string& wellNamePattern = record.getItem("WELL").getTrimmedString(0);
|
||||
const auto well_names = wellNames(wellNamePattern, handlerContext.currentStep);
|
||||
@ -1774,7 +1773,7 @@ namespace {
|
||||
new_config.add_well(well_name, reasons, test_interval, num_test, startup_time, handlerContext.currentStep);
|
||||
}
|
||||
}
|
||||
this->snapshots.back().update_wtest_config( std::move(new_config) );
|
||||
this->snapshots.back().wtest_config.update( std::move(new_config) );
|
||||
}
|
||||
|
||||
void Schedule::handleWTRACER(const HandlerContext& handlerContext, const ParseContext& parseContext, ErrorGuard& errors) {
|
||||
@ -1805,7 +1804,7 @@ namespace {
|
||||
this->updateWPAVE(wname, handlerContext.currentStep, wpave );
|
||||
|
||||
auto& sched_state = this->snapshots.back();
|
||||
sched_state.update_pavg(std::move(wpave));
|
||||
sched_state.pavg.update(std::move(wpave));
|
||||
}
|
||||
|
||||
void Schedule::handleWWPAVE(const HandlerContext& handlerContext, const ParseContext& parseContext, ErrorGuard& errors) {
|
||||
|
@ -495,9 +495,9 @@ void Schedule::iterateScheduleSection(std::size_t load_start, std::size_t load_e
|
||||
}
|
||||
|
||||
void Schedule::addACTIONX(const Action::ActionX& action) {
|
||||
auto new_actions = Action::Actions( this->snapshots.back().actions() );
|
||||
auto new_actions = this->snapshots.back().actions.get();
|
||||
new_actions.add( action );
|
||||
this->snapshots.back().update_actions( std::move(new_actions) );
|
||||
this->snapshots.back().actions.update( std::move(new_actions) );
|
||||
}
|
||||
|
||||
void Schedule::handlePYACTION(const DeckKeyword& keyword) {
|
||||
@ -518,9 +518,9 @@ void Schedule::iterateScheduleSection(std::size_t load_start, std::size_t load_e
|
||||
module = this->m_static.m_input_path + "/" + module_arg;
|
||||
|
||||
Action::PyAction pyaction(this->m_static.m_python_handle, name, run_count, module);
|
||||
auto new_actions = Action::Actions( this->snapshots.back().actions() );
|
||||
auto new_actions = this->snapshots.back().actions.get();
|
||||
new_actions.add(pyaction);
|
||||
this->snapshots.back().update_actions( std::move(new_actions) );
|
||||
this->snapshots.back().actions.update( std::move(new_actions) );
|
||||
}
|
||||
|
||||
void Schedule::applyEXIT(const DeckKeyword& keyword, std::size_t report_step) {
|
||||
@ -831,10 +831,15 @@ void Schedule::iterateScheduleSection(std::size_t load_start, std::size_t load_e
|
||||
|
||||
void Schedule::addWell(Well well, std::size_t report_step) {
|
||||
const std::string wname = well.name();
|
||||
auto& sched_state = this->snapshots.back();
|
||||
|
||||
this->snapshots.back().events().addEvent( ScheduleEvents::NEW_WELL );
|
||||
this->snapshots.back().wellgroup_events().addWell( wname );
|
||||
this->snapshots.back().well_order( wname );
|
||||
sched_state.events().addEvent( ScheduleEvents::NEW_WELL );
|
||||
sched_state.wellgroup_events().addWell( wname );
|
||||
{
|
||||
auto wo = sched_state.well_order.get();
|
||||
wo.add( wname );
|
||||
sched_state.well_order.update( std::move(wo) );
|
||||
}
|
||||
well.setInsertIndex(this->wells_static.size());
|
||||
this->wells_static.insert( std::make_pair(wname, DynamicState<std::shared_ptr<Well>>(m_timeMap, nullptr)));
|
||||
auto& dynamic_well_state = this->wells_static.at(wname);
|
||||
@ -876,7 +881,7 @@ void Schedule::iterateScheduleSection(std::size_t load_start, std::size_t load_e
|
||||
this->addWell( std::move(well), timeStep );
|
||||
|
||||
const auto& ts = this->operator[](timeStep);
|
||||
this->updateWPAVE( wellName, timeStep, ts.pavg() );
|
||||
this->updateWPAVE( wellName, timeStep, ts.pavg.get() );
|
||||
}
|
||||
|
||||
|
||||
@ -1063,7 +1068,7 @@ void Schedule::iterateScheduleSection(std::size_t load_start, std::size_t load_e
|
||||
else
|
||||
sched_state = &this->snapshots.back();
|
||||
|
||||
return WellMatcher(sched_state->well_order(), sched_state->wlist_manager());
|
||||
return WellMatcher(sched_state->well_order.get(), sched_state->wlist_manager.get());
|
||||
}
|
||||
|
||||
|
||||
@ -1869,15 +1874,16 @@ void Schedule::create_first(const std::chrono::system_clock::time_point& start_t
|
||||
sched_state.update_nupcol( this->m_static.m_runspec.nupcol() );
|
||||
sched_state.update_oilvap( OilVaporizationProperties( this->m_static.m_runspec.tabdims().getNumPVTTables() ));
|
||||
sched_state.update_message_limits( this->m_static.m_deck_message_limits );
|
||||
sched_state.update_wtest_config( WellTestConfig() );
|
||||
sched_state.update_gconsale( GConSale() );
|
||||
sched_state.update_gconsump( GConSump() );
|
||||
sched_state.update_wlist_manager( WListManager() );
|
||||
sched_state.update_network( Network::ExtNetwork() );
|
||||
sched_state.update_rpt_config( RPTConfig() );
|
||||
sched_state.update_actions( Action::Actions() );
|
||||
sched_state.update_udq_active( UDQActive() );
|
||||
sched_state.update_well_order( NameOrder() );
|
||||
sched_state.pavg.update( PAvg() );
|
||||
sched_state.wtest_config.update( WellTestConfig() );
|
||||
sched_state.gconsale.update( GConSale() );
|
||||
sched_state.gconsump.update( GConSump() );
|
||||
sched_state.wlist_manager.update( WListManager() );
|
||||
sched_state.network.update( Network::ExtNetwork() );
|
||||
sched_state.rpt_config.update( RPTConfig() );
|
||||
sched_state.actions.update( Action::Actions() );
|
||||
sched_state.udq_active.update( UDQActive() );
|
||||
sched_state.well_order.update( NameOrder() );
|
||||
this->addGroup("FIELD", 0);
|
||||
}
|
||||
|
||||
|
@ -46,8 +46,7 @@ std::chrono::system_clock::time_point clamp_time(std::chrono::system_clock::time
|
||||
|
||||
|
||||
ScheduleState::ScheduleState(const std::chrono::system_clock::time_point& t1):
|
||||
m_start_time(clamp_time(t1)),
|
||||
m_pavg( std::make_shared<PAvg>())
|
||||
m_start_time(clamp_time(t1))
|
||||
{
|
||||
}
|
||||
|
||||
@ -83,15 +82,6 @@ std::chrono::system_clock::time_point ScheduleState::end_time() const {
|
||||
return this->m_end_time.value();
|
||||
}
|
||||
|
||||
|
||||
void ScheduleState::update_pavg(PAvg arg) {
|
||||
this->m_pavg = std::make_shared<PAvg>( std::move(arg) );
|
||||
}
|
||||
|
||||
const PAvg& ScheduleState::pavg() const {
|
||||
return *this->m_pavg;
|
||||
}
|
||||
|
||||
void ScheduleState::update_nupcol(int nupcol) {
|
||||
this->m_nupcol = nupcol;
|
||||
}
|
||||
@ -177,36 +167,39 @@ bool ScheduleState::operator==(const ScheduleState& other) const {
|
||||
this->m_geo_keywords == other.m_geo_keywords &&
|
||||
this->m_message_limits == other.m_message_limits &&
|
||||
this->m_whistctl_mode == other.m_whistctl_mode &&
|
||||
*this->m_wtest_config == *other.m_wtest_config &&
|
||||
*this->m_gconsale == *other.m_gconsale &&
|
||||
*this->m_gconsump == *other.m_gconsump &&
|
||||
*this->m_wlist_manager == *other.m_wlist_manager &&
|
||||
*this->m_rptconfig == *other.m_rptconfig &&
|
||||
*this->m_udq_active == *other.m_udq_active &&
|
||||
this->m_nupcol == other.m_nupcol;
|
||||
this->m_nupcol == other.m_nupcol &&
|
||||
this->wtest_config.get() == other.wtest_config.get() &&
|
||||
this->gconsale.get() == other.gconsale.get() &&
|
||||
this->gconsump.get() == other.gconsump.get() &&
|
||||
this->wlist_manager.get() == other.wlist_manager.get() &&
|
||||
this->rpt_config.get() == other.rpt_config.get() &&
|
||||
this->udq_active.get() == other.udq_active.get();
|
||||
}
|
||||
|
||||
ScheduleState ScheduleState::serializeObject() {
|
||||
auto t1 = std::chrono::system_clock::now();
|
||||
auto t2 = t1 + std::chrono::hours(48);
|
||||
ScheduleState ts(t1, t2);
|
||||
ts.m_vfpprod.emplace( std::make_pair(77, std::make_shared<VFPProdTable>(VFPProdTable::serializeObject() )));
|
||||
ts.m_vfpprod.emplace( std::make_pair(78, std::make_shared<VFPProdTable>(VFPProdTable::serializeObject() )));
|
||||
ts.m_vfpinj.emplace( std::make_pair(177, std::make_shared<VFPInjTable>(VFPInjTable::serializeObject() )));
|
||||
ts.m_vfpinj.emplace( std::make_pair(178, std::make_shared<VFPInjTable>(VFPInjTable::serializeObject() )));
|
||||
ts.m_events = Events::serializeObject();
|
||||
ts.update_nupcol(77);
|
||||
ts.update_oilvap( Opm::OilVaporizationProperties::serializeObject() );
|
||||
ts.m_message_limits = MessageLimits::serializeObject();
|
||||
ts.m_whistctl_mode = Well::ProducerCMode::THP;
|
||||
ts.m_wtest_config = std::make_shared<WellTestConfig>( WellTestConfig::serializeObject() );
|
||||
ts.m_gconsump = std::make_shared<GConSump>( GConSump::serializeObject() );
|
||||
ts.m_gconsale = std::make_shared<GConSale>( GConSale::serializeObject() );
|
||||
ts.m_wlist_manager = std::make_shared<WListManager>( WListManager::serializeObject() );
|
||||
ts.m_rptconfig = std::make_shared<RPTConfig>( RPTConfig::serializeObject() );
|
||||
ts.m_vfpprod.emplace( std::make_pair(77, std::make_shared<VFPProdTable>(VFPProdTable::serializeObject() )));
|
||||
ts.m_vfpprod.emplace( std::make_pair(78, std::make_shared<VFPProdTable>(VFPProdTable::serializeObject() )));
|
||||
ts.m_vfpinj.emplace( std::make_pair(177, std::make_shared<VFPInjTable>(VFPInjTable::serializeObject() )));
|
||||
ts.m_vfpinj.emplace( std::make_pair(178, std::make_shared<VFPInjTable>(VFPInjTable::serializeObject() )));
|
||||
ts.m_actions = std::make_shared<Action::Actions>( Action::Actions::serializeObject() );
|
||||
ts.m_udq_active = std::make_shared<UDQActive>( UDQActive::serializeObject() );
|
||||
ts.m_network = std::make_shared<Network::ExtNetwork>( Network::ExtNetwork::serializeObject() );
|
||||
|
||||
ts.pavg.update( PAvg::serializeObject() );
|
||||
ts.wtest_config.update( WellTestConfig::serializeObject() );
|
||||
ts.gconsump.update( GConSump::serializeObject() );
|
||||
ts.gconsale.update( GConSale::serializeObject() );
|
||||
ts.wlist_manager.update( WListManager::serializeObject() );
|
||||
ts.rpt_config.update( RPTConfig::serializeObject() );
|
||||
ts.actions.update( Action::Actions::serializeObject() );
|
||||
ts.udq_active.update( UDQActive::serializeObject() );
|
||||
ts.network.update( Network::ExtNetwork::serializeObject() );
|
||||
ts.well_order.update( NameOrder::serializeObject() );
|
||||
return ts;
|
||||
}
|
||||
|
||||
@ -247,55 +240,6 @@ const WellGroupEvents& ScheduleState::wellgroup_events() const {
|
||||
return this->m_wellgroup_events;
|
||||
}
|
||||
|
||||
const WellTestConfig& ScheduleState::wtest_config() const {
|
||||
return *this->m_wtest_config;
|
||||
}
|
||||
|
||||
void ScheduleState::update_wtest_config(WellTestConfig wtest_config) {
|
||||
this->m_wtest_config = std::make_shared<WellTestConfig>( std::move(wtest_config) );
|
||||
}
|
||||
|
||||
const GConSale& ScheduleState::gconsale() const {
|
||||
return *this->m_gconsale;
|
||||
}
|
||||
|
||||
void ScheduleState::update_gconsale(GConSale gconsale) {
|
||||
this->m_gconsale = std::make_shared<GConSale>( std::move(gconsale) );
|
||||
}
|
||||
|
||||
const GConSump& ScheduleState::gconsump() const {
|
||||
return *this->m_gconsump;
|
||||
}
|
||||
|
||||
void ScheduleState::update_gconsump(GConSump gconsump) {
|
||||
this->m_gconsump = std::make_shared<GConSump>( std::move(gconsump) );
|
||||
}
|
||||
|
||||
const WListManager& ScheduleState::wlist_manager() const {
|
||||
return *this->m_wlist_manager;
|
||||
}
|
||||
|
||||
void ScheduleState::update_wlist_manager(WListManager wlist_manager) {
|
||||
this->m_wlist_manager = std::make_shared<WListManager>( std::move(wlist_manager) );
|
||||
}
|
||||
|
||||
const Network::ExtNetwork& ScheduleState::network() const {
|
||||
return *this->m_network;
|
||||
}
|
||||
|
||||
void ScheduleState::update_network(Network::ExtNetwork network) {
|
||||
this->m_network = std::make_shared<Network::ExtNetwork>( std::move(network) );
|
||||
}
|
||||
|
||||
const RPTConfig& ScheduleState::rpt_config() const {
|
||||
return *this->m_rptconfig;
|
||||
}
|
||||
|
||||
|
||||
void ScheduleState::update_rpt_config(RPTConfig rpt_config) {
|
||||
this->m_rptconfig = std::make_shared<RPTConfig>(std::move(rpt_config));
|
||||
}
|
||||
|
||||
std::vector<std::reference_wrapper<const VFPProdTable>> ScheduleState::vfpprod() const {
|
||||
std::vector<std::reference_wrapper<const VFPProdTable>> tables;
|
||||
for (const auto& [_, table] : this->m_vfpprod) {
|
||||
@ -355,34 +299,4 @@ void ScheduleState::update_vfpinj(VFPInjTable vfpinj) {
|
||||
this->m_vfpinj[table_id] = std::make_shared<VFPInjTable>( std::move(vfpinj) );
|
||||
}
|
||||
|
||||
const Action::Actions& ScheduleState::actions() const {
|
||||
return *this->m_actions;
|
||||
}
|
||||
|
||||
void ScheduleState::update_actions(Action::Actions actions) {
|
||||
this->m_actions = std::make_shared<Action::Actions>( std::move(actions) );
|
||||
}
|
||||
|
||||
const UDQActive& ScheduleState::udq_active() const {
|
||||
return *this->m_udq_active;
|
||||
}
|
||||
|
||||
void ScheduleState::update_udq_active(UDQActive udq_active) {
|
||||
this->m_udq_active = std::make_shared<UDQActive>( std::move(udq_active) );
|
||||
}
|
||||
|
||||
const NameOrder& ScheduleState::well_order() const {
|
||||
return *this->m_well_order;
|
||||
}
|
||||
|
||||
void ScheduleState::update_well_order(NameOrder well_order) {
|
||||
this->m_well_order = std::make_shared<NameOrder>( std::move(well_order) );
|
||||
}
|
||||
|
||||
void ScheduleState::well_order(const std::string& well) {
|
||||
auto well_order = *this->m_well_order;
|
||||
well_order.add( well );
|
||||
this->m_well_order = std::make_shared<NameOrder>( std::move(well_order) );
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -353,7 +353,7 @@ namespace {
|
||||
|
||||
const auto nstep = sched.getTimeMap().numTimesteps();
|
||||
for (auto step = 0*nstep; step < nstep; ++step) {
|
||||
const auto& nodes = sched[step].network().node_names();
|
||||
const auto& nodes = sched[step].network.get().node_names();
|
||||
names.insert(nodes.begin(), nodes.end());
|
||||
}
|
||||
|
||||
|
@ -764,10 +764,10 @@ TSTEP
|
||||
|
||||
Runspec runspec (deck);
|
||||
Schedule sched(deck, grid1, fp, runspec, python);
|
||||
const auto& actions0 = sched[0].actions();
|
||||
const auto& actions0 = sched[0].actions.get();
|
||||
BOOST_CHECK_EQUAL(actions0.size(), 0U);
|
||||
|
||||
const auto& actions1 = sched[1].actions();
|
||||
const auto& actions1 = sched[1].actions.get();
|
||||
BOOST_CHECK_EQUAL(actions1.size(), 1U);
|
||||
|
||||
|
||||
@ -804,7 +804,7 @@ TSTEP
|
||||
|
||||
/*****************************************************************/
|
||||
|
||||
const auto& actions2 = sched[2].actions();
|
||||
const auto& actions2 = sched[2].actions.get();
|
||||
BOOST_CHECK_EQUAL(actions2.size(), 2U);
|
||||
|
||||
const auto& actB = actions2.get("B");
|
||||
@ -920,8 +920,8 @@ ENDACTIO
|
||||
|
||||
Runspec runspec (deck);
|
||||
Schedule sched(deck, grid1, fp, runspec, python);
|
||||
const auto& action1 = sched[1].actions().get("A");
|
||||
const auto& action2 = sched[2].actions().get("A");
|
||||
const auto& action1 = sched[1].actions.get().get("A");
|
||||
const auto& action2 = sched[2].actions.get().get("A");
|
||||
|
||||
BOOST_CHECK(action1.id() != action2.id());
|
||||
|
||||
@ -976,7 +976,7 @@ TSTEP
|
||||
auto unit_system = UnitSystem::newMETRIC();
|
||||
const auto st = SummaryState{ std::chrono::system_clock::now() };
|
||||
Schedule sched = make_schedule(deck_string);
|
||||
const auto& action1 = sched[0].actions().get("A");
|
||||
const auto& action1 = sched[0].actions.get().get("A");
|
||||
{
|
||||
const auto& group = sched.getGroup("G1", 0);
|
||||
const auto& prod = group.productionControls(st);
|
||||
@ -1040,7 +1040,7 @@ TSTEP
|
||||
|
||||
Opm::UnitSystem unitSystem = UnitSystem( UnitSystem::UnitType::UNIT_TYPE_METRIC );
|
||||
auto sched = make_schedule(input);
|
||||
const auto& action1 = sched[0].actions().get("A");
|
||||
const auto& action1 = sched[0].actions.get().get("A");
|
||||
{
|
||||
const auto& glo = sched.glo(0);
|
||||
BOOST_CHECK(!glo.has_group("PLAT-A"));
|
||||
@ -1096,7 +1096,7 @@ TSTEP
|
||||
|
||||
const auto st = SummaryState{ std::chrono::system_clock::now() };
|
||||
Schedule sched = make_schedule(deck_string);
|
||||
const auto& action1 = sched[0].actions().get("A");
|
||||
const auto& action1 = sched[0].actions.get().get("A");
|
||||
{
|
||||
const auto& well = sched.getWell("PROD1", 0);
|
||||
BOOST_CHECK_EQUAL( well.getWellPIScalingFactor(1.0), 1.0);
|
||||
@ -1138,7 +1138,7 @@ ENDACTIO
|
||||
Opm::WListManager wlm;
|
||||
Opm::Action::Context context(st, wlm);
|
||||
|
||||
const auto& config = sched[0].actions();
|
||||
const auto& config = sched[0].actions.get();
|
||||
const Opm::Action::ActionX& action = config.get("ACT1");
|
||||
|
||||
/*
|
||||
|
@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(TESTGCONSALE) {
|
||||
auto schedule = create_schedule(input);
|
||||
double metric_to_si = 1.0 / (24.0 * 3600.0); //cubic meters / day
|
||||
|
||||
const auto& gconsale = schedule[0].gconsale();
|
||||
const auto& gconsale = schedule[0].gconsale.get();
|
||||
BOOST_CHECK_EQUAL(gconsale.size(), 1U);
|
||||
BOOST_CHECK(gconsale.has("G1"));
|
||||
BOOST_CHECK(!gconsale.has("G2"));
|
||||
@ -370,7 +370,7 @@ BOOST_AUTO_TEST_CASE(TESTGCONSALE) {
|
||||
BOOST_CHECK_EQUAL(group.min_sales_rate.getSI(), 45000 * metric_to_si);
|
||||
BOOST_CHECK(group.max_proc == GConSale::MaxProcedure::WELL);
|
||||
|
||||
const auto& gconsump = schedule[0].gconsump();
|
||||
const auto& gconsump = schedule[0].gconsump.get();
|
||||
BOOST_CHECK_EQUAL(gconsump.size(), 2U);
|
||||
BOOST_CHECK(gconsump.has("G1"));
|
||||
BOOST_CHECK(gconsump.has("G2"));
|
||||
|
@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(CreateNetwork) {
|
||||
Network::ExtNetwork network;
|
||||
BOOST_CHECK( !network.active() );
|
||||
auto schedule = make_schedule("SCHEDULE\n");
|
||||
auto network2 = schedule[0].network();
|
||||
auto network2 = schedule[0].network.get();
|
||||
BOOST_CHECK( !network2.active() );
|
||||
}
|
||||
|
||||
@ -206,7 +206,7 @@ BRANPROP
|
||||
|
||||
auto sched = make_schedule(deck_string);
|
||||
{
|
||||
const auto& network = sched[0].network();
|
||||
const auto& network = sched[0].network.get();
|
||||
const auto& b1 = network.node("B1");
|
||||
BOOST_CHECK(b1.as_choke());
|
||||
BOOST_CHECK(!b1.add_gas_lift_gas());
|
||||
@ -240,7 +240,7 @@ BRANPROP
|
||||
BOOST_CHECK(network.active());
|
||||
}
|
||||
{
|
||||
const auto& network = sched[1].network();
|
||||
const auto& network = sched[1].network.get();
|
||||
const auto& b1 = network.node("B1");
|
||||
BOOST_CHECK(b1.as_choke());
|
||||
BOOST_CHECK(!b1.add_gas_lift_gas());
|
||||
@ -311,7 +311,7 @@ BRANPROP
|
||||
"B1", "C1", "PLAT-A"
|
||||
};
|
||||
|
||||
auto nodes = sched[0].network().node_names();
|
||||
auto nodes = sched[0].network.get().node_names();
|
||||
std::sort(nodes.begin(), nodes.end());
|
||||
|
||||
BOOST_CHECK_EQUAL_COLLECTIONS(nodes.begin(), nodes.end(), expect.begin(), expect.end());
|
||||
|
@ -276,10 +276,10 @@ BOOST_AUTO_TEST_CASE(SerializeWTest) {
|
||||
{
|
||||
std::vector<Opm::WellTestConfig> value_list;
|
||||
std::vector<std::size_t> index_list;
|
||||
sched.pack_state<Opm::WellTestConfig>( value_list, index_list, std::mem_fn( &ScheduleState::wtest_config ));
|
||||
sched.pack_state<Opm::WellTestConfig>( value_list, index_list );
|
||||
BOOST_CHECK_EQUAL( value_list.size(), 2 );
|
||||
|
||||
sched0.unpack_state<Opm::WellTestConfig>( value_list, index_list, std::mem_fn( &ScheduleState::update_wtest_config ));
|
||||
sched0.unpack_state<Opm::WellTestConfig>( value_list, index_list );
|
||||
}
|
||||
BOOST_CHECK( wtest1 == sched0[0].wtest_config());
|
||||
BOOST_CHECK( wtest1 == sched0[1].wtest_config());
|
||||
@ -299,9 +299,9 @@ BOOST_AUTO_TEST_CASE(SerializeWList) {
|
||||
{
|
||||
std::vector<Opm::WListManager> value_list;
|
||||
std::vector<std::size_t> index_list;
|
||||
sched.pack_state<Opm::WListManager>( value_list, index_list, std::mem_fn( &ScheduleState::wlist_manager ));
|
||||
sched.pack_state<Opm::WListManager>( value_list, index_list);
|
||||
BOOST_CHECK_EQUAL( value_list.size(), 2 );
|
||||
sched0.unpack_state<Opm::WListManager>( value_list, index_list, std::mem_fn(&ScheduleState::update_wlist_manager) );
|
||||
sched0.unpack_state<Opm::WListManager>( value_list, index_list );
|
||||
}
|
||||
BOOST_CHECK( wlm1 == sched0[0].wlist_manager());
|
||||
BOOST_CHECK( wlm1 == sched0[1].wlist_manager());
|
||||
@ -317,15 +317,15 @@ BOOST_AUTO_TEST_CASE(SerializeWList) {
|
||||
BOOST_AUTO_TEST_CASE(SerializeGCONSALE) {
|
||||
auto sched = make_schedule(GCONSALE_deck);
|
||||
auto sched0 = make_schedule(deck0);
|
||||
auto gconsale1 = sched[0].gconsale();
|
||||
auto gconsale2 = sched[3].gconsale();
|
||||
auto gconsale1 = sched[0].gconsale.get();
|
||||
auto gconsale2 = sched[3].gconsale.get();
|
||||
|
||||
{
|
||||
std::vector<Opm::GConSale> value_list;
|
||||
std::vector<std::size_t> index_list;
|
||||
sched.pack_state<Opm::GConSale>( value_list, index_list, std::mem_fn( &ScheduleState::gconsale ));
|
||||
sched.pack_state<Opm::GConSale>( value_list, index_list );
|
||||
BOOST_CHECK_EQUAL( value_list.size(), 2 );
|
||||
sched0.unpack_state<Opm::GConSale>( value_list, index_list, std::mem_fn( &ScheduleState::update_gconsale ));
|
||||
sched0.unpack_state<Opm::GConSale>( value_list, index_list );
|
||||
}
|
||||
|
||||
BOOST_CHECK( gconsale1 == sched0[0].gconsale());
|
||||
@ -340,15 +340,15 @@ BOOST_AUTO_TEST_CASE(SerializeGCONSALE) {
|
||||
BOOST_AUTO_TEST_CASE(SerializeGCONSUMP) {
|
||||
auto sched = make_schedule(GCONSALE_deck);
|
||||
auto sched0 = make_schedule(deck0);
|
||||
auto gconsump1 = sched[0].gconsump();
|
||||
auto gconsump2 = sched[3].gconsump();
|
||||
auto gconsump1 = sched[0].gconsump.get();
|
||||
auto gconsump2 = sched[3].gconsump.get();
|
||||
|
||||
{
|
||||
std::vector<Opm::GConSump> value_list;
|
||||
std::vector<std::size_t> index_list;
|
||||
sched.pack_state<Opm::GConSump>( value_list, index_list, std::mem_fn( &ScheduleState::gconsump ));
|
||||
sched.pack_state<Opm::GConSump>( value_list, index_list );
|
||||
BOOST_CHECK_EQUAL( value_list.size(), 2 );
|
||||
sched0.unpack_state<Opm::GConSump>( value_list, index_list, std::mem_fn( &ScheduleState::update_gconsump ));
|
||||
sched0.unpack_state<Opm::GConSump>( value_list, index_list );
|
||||
}
|
||||
|
||||
BOOST_CHECK( gconsump1 == sched0[0].gconsump());
|
||||
|
@ -2900,12 +2900,12 @@ WCONINJH
|
||||
BOOST_CHECK_CLOSE( 0.0 * 1e5, pro1.THPH, 1e-5 );
|
||||
|
||||
{
|
||||
const auto& wtest_config = schedule[0].wtest_config();
|
||||
const auto& wtest_config = schedule[0].wtest_config.get();
|
||||
BOOST_CHECK_EQUAL(wtest_config.size(), 0U);
|
||||
}
|
||||
|
||||
{
|
||||
const auto& wtest_config = schedule[1].wtest_config();
|
||||
const auto& wtest_config = schedule[1].wtest_config.get();
|
||||
BOOST_CHECK_EQUAL(wtest_config.size(), 0U);
|
||||
}
|
||||
}
|
||||
@ -3198,12 +3198,12 @@ TSTEP
|
||||
BOOST_AUTO_TEST_CASE(WTEST_CONFIG) {
|
||||
const auto& schedule = make_schedule(createDeckWTEST());
|
||||
|
||||
const auto& wtest_config1 = schedule[0].wtest_config();
|
||||
const auto& wtest_config1 = schedule[0].wtest_config.get();
|
||||
BOOST_CHECK_EQUAL(wtest_config1.size(), 2U);
|
||||
BOOST_CHECK(wtest_config1.has("ALLOW"));
|
||||
BOOST_CHECK(!wtest_config1.has("BAN"));
|
||||
|
||||
const auto& wtest_config2 = schedule[1].wtest_config();
|
||||
const auto& wtest_config2 = schedule[1].wtest_config.get();
|
||||
BOOST_CHECK_EQUAL(wtest_config2.size(), 3U);
|
||||
BOOST_CHECK(!wtest_config2.has("ALLOW"));
|
||||
BOOST_CHECK(wtest_config2.has("BAN"));
|
||||
|
@ -1518,7 +1518,7 @@ BOOST_AUTO_TEST_CASE(IntegrationTest) {
|
||||
#include "data/integration_tests/udq.data"
|
||||
auto schedule = make_schedule(deck_string);
|
||||
{
|
||||
const auto& active = schedule[1].udq_active();
|
||||
const auto& active = schedule[1].udq_active.get();
|
||||
BOOST_CHECK_EQUAL(active.IUAD_size(), 6U);
|
||||
|
||||
BOOST_CHECK(active[0].control == UDAControl::WCONPROD_ORAT);
|
||||
@ -1598,7 +1598,7 @@ WCONPROD
|
||||
|
||||
// First timestep
|
||||
{
|
||||
const auto& udq_active = schedule[0].udq_active();
|
||||
const auto& udq_active = schedule[0].udq_active.get();
|
||||
BOOST_CHECK(udq_active);
|
||||
BOOST_CHECK_EQUAL(udq_active.IUAD_size(), 2U);
|
||||
|
||||
@ -1619,7 +1619,7 @@ WCONPROD
|
||||
// Second timestep
|
||||
// - The WUOPRU and WULPRU udq are still used in the same manner for the PROD1 well.
|
||||
// - The new UDQs WUXO and WUXL are now used for the PROD2 well.
|
||||
const auto& udq_active = schedule[1].udq_active();
|
||||
const auto& udq_active = schedule[1].udq_active.get();
|
||||
BOOST_CHECK(udq_active);
|
||||
BOOST_CHECK_EQUAL(udq_active.IUAD_size(), 4U);
|
||||
|
||||
@ -1652,7 +1652,7 @@ WCONPROD
|
||||
// Third timestep
|
||||
// - The new UDQs WUXO and WUXL are now used for the PROD2 well.
|
||||
// - The PROD1 well does not use UDQ
|
||||
const auto& udq_active = schedule[2].udq_active();
|
||||
const auto& udq_active = schedule[2].udq_active.get();
|
||||
BOOST_CHECK(udq_active);
|
||||
BOOST_CHECK_EQUAL(udq_active.IUAD_size(), 2U);
|
||||
|
||||
|
@ -163,7 +163,7 @@ BOOST_AUTO_TEST_CASE(WlistFromDeck) {
|
||||
|
||||
|
||||
Opm::Schedule sched = createSchedule(no_wlist);
|
||||
auto& wlm = sched[1].wlist_manager();
|
||||
auto& wlm = sched[1].wlist_manager.get();
|
||||
BOOST_CHECK(!wlm.hasList("LIST1"));
|
||||
}
|
||||
|
||||
@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(Wlist) {
|
||||
|
||||
auto sched = createSchedule(wlist);
|
||||
{
|
||||
const auto& wlm = sched[1].wlist_manager();
|
||||
const auto& wlm = sched[1].wlist_manager.get();
|
||||
const auto& wl1 = wlm.getList("*LIST1");
|
||||
const auto& wl2 = wlm.getList("*LIST2");
|
||||
const auto& wl4 = wlm.getList("*LIST4");
|
||||
@ -247,7 +247,7 @@ BOOST_AUTO_TEST_CASE(Wlist) {
|
||||
BOOST_CHECK_EQUAL(wl6.wells().size(), 0U );
|
||||
}
|
||||
{
|
||||
const auto& wlm = sched[2].wlist_manager();
|
||||
const auto& wlm = sched[2].wlist_manager.get();
|
||||
const auto& wl1 = wlm.getList("*LIST1");
|
||||
const auto& wl2 = wlm.getList("*LIST2");
|
||||
const auto& wl3 = wlm.getList("*LIST3");
|
||||
@ -292,7 +292,7 @@ BOOST_AUTO_TEST_CASE(WlistPattern) {
|
||||
"/\n";
|
||||
|
||||
auto sched = createSchedule(wlist);
|
||||
const auto& wlm = sched[1].wlist_manager();
|
||||
const auto& wlm = sched[1].wlist_manager.get();
|
||||
BOOST_CHECK( vector_equal(wlm.wells("*LIST1"), {"W1", "W2"}));
|
||||
BOOST_CHECK( vector_equal(wlm.wells("*LIST2"), {"W1", "W3"}));
|
||||
BOOST_CHECK( vector_equal(wlm.wells("*LIST*"), {"W1", "W2", "W3"}));
|
||||
|
@ -125,7 +125,7 @@ RPTSCHED
|
||||
|
||||
// Empty initial report configuration
|
||||
{
|
||||
auto report_config = sched[0].rpt_config();
|
||||
auto report_config = sched[0].rpt_config.get();
|
||||
BOOST_CHECK_EQUAL(report_config.size(), 0U);
|
||||
|
||||
BOOST_CHECK(!report_config.contains("FIPFOAM"));
|
||||
@ -134,7 +134,7 @@ RPTSCHED
|
||||
|
||||
// Configuration at step 1
|
||||
{
|
||||
auto report_config = sched[1].rpt_config();
|
||||
auto report_config = sched[1].rpt_config.get();
|
||||
BOOST_CHECK_EQUAL( report_config.size() , 2U);
|
||||
|
||||
for (const auto& p : report_config) {
|
||||
@ -153,7 +153,7 @@ RPTSCHED
|
||||
|
||||
// Configuration at step 2 - the special 'NOTHING' has cleared everything
|
||||
{
|
||||
auto report_config = sched[2].rpt_config();
|
||||
auto report_config = sched[2].rpt_config.get();
|
||||
BOOST_CHECK_EQUAL(report_config.size(), 0U);
|
||||
|
||||
BOOST_CHECK(!report_config.contains("FIPFOAM"));
|
||||
|
Loading…
Reference in New Issue
Block a user