diff --git a/src/core/include/openvino/core/model.hpp b/src/core/include/openvino/core/model.hpp index e5d0158e538..dae178b011a 100644 --- a/src/core/include/openvino/core/model.hpp +++ b/src/core/include/openvino/core/model.hpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "openvino/core/any.hpp" @@ -554,6 +555,7 @@ private: // of weak_ptr not to increase node ref counter to prevent the situation when // node has no consumers but still exists in a graph. mutable std::vector> m_cached_ordered_ops; + mutable std::unordered_set m_cached_ops; mutable std::unordered_map> m_cached_output_names; mutable std::unordered_map> m_cached_op_names; diff --git a/src/core/src/model.cpp b/src/core/src/model.cpp index 142514be453..d4fd18df72c 100644 --- a/src/core/src/model.cpp +++ b/src/core/src/model.cpp @@ -318,6 +318,7 @@ std::vector> ov::Model::get_ordered_ops() const { m_cached_ordered_ops.clear(); for_each(order.cbegin(), order.cend(), [this](const shared_ptr& node) { m_cached_ordered_ops.push_back(node); + m_cached_ops.insert(node.get()); node->insert_info(m_shared_rt_info); }); m_cached_output_names.clear(); @@ -923,6 +924,9 @@ ov::Output ov::Model::add_output(const std::string& op_name, size_t ou } ov::Output ov::Model::add_output(const ov::Output& port) { + auto cache_valid = [&]() { + return m_cached_ops.count(port.get_node()); + }; if (ov::op::util::is_output(port.get_node())) return port; for (const auto& input : port.get_target_inputs()) { @@ -934,9 +938,14 @@ ov::Output ov::Model::add_output(const ov::Output& port) { auto result = std::make_shared(port); m_results.push_back(result); if (m_shared_rt_info->get_use_topological_cache()) { - // Full update of topological cache is not needed, 'result' can be just inserted to the end - m_cached_ordered_ops.push_back(result); - result->insert_info(m_shared_rt_info); // Just for consistency, not required for Result nodes + if (cache_valid()) { + // Full update of topological cache is not needed, 'result' can be just inserted to the end + m_cached_ordered_ops.push_back(result); + m_cached_ops.insert(result.get()); + result->insert_info(m_shared_rt_info); // Just for consistency, not required for Result nodes + } else { + m_shared_rt_info->set_use_topological_cache(false); + } } return result->output(0); } diff --git a/src/core/tests/model.cpp b/src/core/tests/model.cpp index f5550c62c79..3ba65dd8c0a 100644 --- a/src/core/tests/model.cpp +++ b/src/core/tests/model.cpp @@ -1034,6 +1034,31 @@ TEST(model, add_output_port) { EXPECT_EQ(f->get_results()[1]->input_value(0).get_node(), relu1.get()); } +TEST(model, add_output_to_new_subgraph) { + auto arg0 = std::make_shared(ov::element::f32, ov::PartialShape{1}); + arg0->set_friendly_name("data"); + arg0->get_output_tensor(0).set_names({"input"}); + + auto relu1 = std::make_shared(arg0); + relu1->set_friendly_name("relu1"); + relu1->get_output_tensor(0).set_names({"relu_t1"}); + + auto relu2 = std::make_shared(relu1); + relu2->set_friendly_name("relu2"); + relu2->get_output_tensor(0).set_names({"relu_t2"}); + auto f = std::make_shared(relu2, ov::ParameterVector{arg0}); + f->validate_nodes_and_infer_types(); + + EXPECT_EQ(f->get_results().size(), 1); + + ov::Output out; + EXPECT_NO_THROW( + out = f->add_output(ov::opset8::Constant::create(ov::element::i32, {1}, std::vector{1})->output(0))); + EXPECT_NO_THROW(f->get_ordered_ops()); + EXPECT_EQ(out.get_node(), f->get_results()[1].get()); + EXPECT_EQ(f->get_results().size(), 2); +} + TEST(model, add_output_incorrect_tensor_name) { auto arg0 = std::make_shared(ov::element::f32, ov::PartialShape{1}); arg0->set_friendly_name("data");