diff --git a/src/common/offline_transformations/src/compress_quantize_weigths.cpp b/src/common/offline_transformations/src/compress_quantize_weigths.cpp index f5892aeb7bb..320e40dda42 100644 --- a/src/common/offline_transformations/src/compress_quantize_weigths.cpp +++ b/src/common/offline_transformations/src/compress_quantize_weigths.cpp @@ -67,7 +67,9 @@ ngraph::pass::CompressQuantizeWeights::CompressQuantizeWeights() { if (fq_users.size() == 1 && has_dequantization_subgraph(fq_users[0])) { auto& first_convert = fq_users[0]; if (auto new_weights = ov::get_constant_from_source(first_convert)) { + new_weights->set_friendly_name(first_convert->get_friendly_name()); replace_node(first_convert, new_weights); + copy_runtime_info(first_convert, new_weights); // preserve dequantization subgraph for LP transformations auto weights_users = new_weights->get_users(); if (weights_users.size() == 1 && ov::is_type(weights_users[0])) { diff --git a/src/common/offline_transformations/src/pruning/shrink_weights.cpp b/src/common/offline_transformations/src/pruning/shrink_weights.cpp index 16c8c18805a..4d13e020428 100644 --- a/src/common/offline_transformations/src/pruning/shrink_weights.cpp +++ b/src/common/offline_transformations/src/pruning/shrink_weights.cpp @@ -118,6 +118,7 @@ static bool handle_variadic_split(const std::shared_ptr& split) { const auto& split_lengths_type = split_lengths_node->get_output_element_type(0); const auto sub_const = ngraph::opset6::Constant::create(split_lengths_type, {sub_values.size()}, sub_values); const auto sub = std::make_shared(split->input_value(2), sub_const); + copy_runtime_info(split->get_input_source_output(2).get_node_shared_ptr(), {sub_const, sub}); split->input(2).replace_source_output(sub); return true; @@ -259,6 +260,7 @@ bool ngraph::pass::ShrinkWeights::run_on_model(const std::shared_ptrget_element_type(), Shape{res.size()}, res); replace_node(const_node, new_const); + copy_runtime_info(const_node, new_const); NGRAPH_DEBUG << "Transform shape like (" << last_output.get_node()->get_friendly_name() << "): " << const_node->get_shape_val() << " to " << new_const->get_shape_val() << std::endl; new_const->set_friendly_name(const_node->get_friendly_name()); @@ -303,6 +305,7 @@ bool ngraph::pass::ShrinkWeights::run_on_model(const std::shared_ptr +#include #include ov::pass::AlignEltwiseInputRanks::AlignEltwiseInputRanks() { @@ -50,6 +51,7 @@ ov::pass::AlignEltwiseInputRanks::AlignEltwiseInputRanks() { Shape new_shape = const_shape; new_shape.insert(new_shape.begin(), diff, 1); auto new_const = std::make_shared(*const_node, new_shape); + copy_runtime_info(node->get_input_node_shared_ptr(i), new_const); node->input(i).replace_source_output(new_const); } } diff --git a/src/common/transformations/src/transformations/common_optimizations/fold_subgraph_empty_inputs.cpp b/src/common/transformations/src/transformations/common_optimizations/fold_subgraph_empty_inputs.cpp index 67178115a82..521422a23f0 100644 --- a/src/common/transformations/src/transformations/common_optimizations/fold_subgraph_empty_inputs.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/fold_subgraph_empty_inputs.cpp @@ -55,6 +55,7 @@ ov::pass::FoldSubgraphEmptyInputs::FoldSubgraphEmptyInputs() { std::end(multi_subgraph_op_inputs), input, const_empty_replacement); + copy_runtime_info(input.get_node_shared_ptr(), const_empty_replacement.get_node_shared_ptr()); } multi_subgraph_op->set_arguments(multi_subgraph_op_inputs); return true; diff --git a/src/common/transformations/src/transformations/common_optimizations/remove_concat_zero_dim_input.cpp b/src/common/transformations/src/transformations/common_optimizations/remove_concat_zero_dim_input.cpp index 6fd4eba52c0..94be035eed8 100644 --- a/src/common/transformations/src/transformations/common_optimizations/remove_concat_zero_dim_input.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/remove_concat_zero_dim_input.cpp @@ -53,6 +53,7 @@ ov::pass::RemoveConcatZeroDimInput::RemoveConcatZeroDimInput() { const auto& empty_constant = opset8::Constant::create(concat->get_output_element_type(0), concat->get_output_partial_shape(0).to_shape(), {}); + copy_runtime_info(concat, empty_constant); concat->output(0).replace(empty_constant); empty_constant->set_friendly_name(concat->get_friendly_name()); } else { diff --git a/src/common/transformations/src/transformations/common_optimizations/transpose_sinking_split.cpp b/src/common/transformations/src/transformations/common_optimizations/transpose_sinking_split.cpp index 1171bd0aeb4..b623cf6b830 100644 --- a/src/common/transformations/src/transformations/common_optimizations/transpose_sinking_split.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/transpose_sinking_split.cpp @@ -155,6 +155,10 @@ ov::pass::TransposeSinkingSplitBackward::TransposeSinkingSplitBackward() { Shape{}, reversed_transposed_split_axis); split->input(1).replace_source_output(new_split_axis_const); + copy_runtime_info({split_axis_constant, + output_transpose.transpose->shared_from_this(), + output_transpose.transpose_const->shared_from_this()}, + new_split_axis_const); // remove split output transposes for (size_t output_idx = 0; output_idx < split->get_output_size(); ++output_idx) { @@ -196,6 +200,8 @@ ov::pass::TransposeSinkingSplitForward::TransposeSinkingSplitForward() { auto new_split_axis_const = std::make_shared(split_axis_constant->get_element_type(), Shape{}, transposed_split_axis); split_node->input(1).replace_source_output(new_split_axis_const); + copy_runtime_info({split_axis_constant, transpose_input_info.transpose, transpose_input_info.transpose_const}, + new_split_axis_const); return true; }; diff --git a/src/common/transformations/src/transformations/smart_reshape/shape_of_const_folding.cpp b/src/common/transformations/src/transformations/smart_reshape/shape_of_const_folding.cpp index 0ff1f214c27..ac344ccaa1b 100644 --- a/src/common/transformations/src/transformations/smart_reshape/shape_of_const_folding.cpp +++ b/src/common/transformations/src/transformations/smart_reshape/shape_of_const_folding.cpp @@ -4,6 +4,8 @@ #include "transformations/smart_reshape/shape_of_const_folding.hpp" +#include + #include "itt.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/shape_of.hpp" @@ -19,6 +21,7 @@ ov::pass::ShapeOfConstFolding::ShapeOfConstFolding() { auto node = m.get_match_root(); if (auto constant = get_constant_from_source(node)) { constant->set_friendly_name(node->get_friendly_name()); + copy_runtime_info(node, constant); replace_node(node, constant); return true; } diff --git a/src/core/include/openvino/core/runtime_attribute.hpp b/src/core/include/openvino/core/runtime_attribute.hpp index e8be40caebb..452b9a813b0 100644 --- a/src/core/include/openvino/core/runtime_attribute.hpp +++ b/src/core/include/openvino/core/runtime_attribute.hpp @@ -30,6 +30,7 @@ public: using Base = std::tuple<::ov::RuntimeAttribute>; virtual ~RuntimeAttribute() = default; virtual bool is_copyable() const; + virtual bool is_copyable(const std::shared_ptr& to) const; virtual Any init(const std::shared_ptr& node) const; virtual Any merge(const ov::NodeVector& nodes) const; virtual Any merge(const ov::OutputVector& outputs) const; diff --git a/src/core/include/openvino/op/shape_of.hpp b/src/core/include/openvino/op/shape_of.hpp index 2c9b5016b78..448340bd582 100644 --- a/src/core/include/openvino/op/shape_of.hpp +++ b/src/core/include/openvino/op/shape_of.hpp @@ -4,16 +4,16 @@ #pragma once -#include "openvino/op/op.hpp" +#include "openvino/op/util/shape_of_base.hpp" namespace ov { namespace op { namespace v3 { /// \brief Operation that returns the shape of its input argument as a tensor. /// \ingroup ov_ops_cpp_api -class OPENVINO_API ShapeOf : public Op { +class OPENVINO_API ShapeOf : public util::ShapeOfBase { public: - OPENVINO_OP("ShapeOf", "opset3", op::Op, 3); + OPENVINO_OP("ShapeOf", "opset3", util::ShapeOfBase, 3); ShapeOf() = default; /// \brief Constructs a shape-of operation. ShapeOf(const Output& arg, const element::Type output_type = element::i64); @@ -51,9 +51,9 @@ private: namespace v0 { /// \brief Operation that returns the shape of its input argument as a tensor. /// \ingroup ov_ops_cpp_api -class OPENVINO_API ShapeOf : public Op { +class OPENVINO_API ShapeOf : public util::ShapeOfBase { public: - OPENVINO_OP("ShapeOf", "opset1"); + OPENVINO_OP("ShapeOf", "opset1", util::ShapeOfBase); ShapeOf() = default; /// \brief Constructs a shape-of operation. ShapeOf(const Output& arg); diff --git a/src/core/include/openvino/op/util/shape_of_base.hpp b/src/core/include/openvino/op/util/shape_of_base.hpp new file mode 100644 index 00000000000..87e529ab839 --- /dev/null +++ b/src/core/include/openvino/op/util/shape_of_base.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API ShapeOfBase : public Op { +public: + OPENVINO_OP("ShapeOfBase", "util"); + + ShapeOfBase() = default; + + /// \brief Constructs an ShapeOfBase operation. + explicit ShapeOfBase(const OutputVector& arguments) : Op(arguments) {} +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/pass/constant_folding.hpp b/src/core/include/openvino/pass/constant_folding.hpp index f95797601dc..668eca75292 100644 --- a/src/core/include/openvino/pass/constant_folding.hpp +++ b/src/core/include/openvino/pass/constant_folding.hpp @@ -22,7 +22,7 @@ public: bool run_on_model(const std::shared_ptr& model) override; protected: - void copy_runtime_info_to_target_inputs(const std::shared_ptr& node, const Output& replacement); + void copy_runtime_info_from_input_values(const std::shared_ptr& node); /// \brief Folds pre-calculated output tensor values to constants in case lower and /// upper estimations are equal. Traverses graph backwards starting from the results. bool pre_calculated_values_folding(const std::shared_ptr& model); diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index aa13c132e13..4f753c68464 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -5,6 +5,7 @@ #include "ngraph/node.hpp" #include +#include #include #include #include @@ -814,8 +815,10 @@ bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& in if (!all_constants) return false; + NodeVector nodes; TensorVector input_tensors; for (const auto& input : input_values) { + nodes.push_back(input.get_node_shared_ptr()); auto constant = ov::as_type_ptr(input.get_node_shared_ptr()); auto tensor = ov::Tensor(input.get_element_type(), input.get_shape()); std::copy_n(constant->get_data_ptr(), constant->get_byte_size(), static_cast(tensor.data())); @@ -833,6 +836,7 @@ bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& in output_values[i] = make_shared(output_tensors[i].get_element_type(), output_tensors[i].get_shape(), output_tensors[i].data()); + copy_runtime_info(nodes, output_values[i].get_node_shared_ptr()); } return true; } diff --git a/src/core/src/op/shape_of.cpp b/src/core/src/op/shape_of.cpp index 722638dbd2a..8631d8a6f86 100644 --- a/src/core/src/op/shape_of.cpp +++ b/src/core/src/op/shape_of.cpp @@ -21,7 +21,9 @@ using namespace std; using namespace ngraph; -op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) : Op({arg}), m_output_type(output_type) { +op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) + : ShapeOfBase({arg}), + m_output_type(output_type) { constructor_validate_and_infer_types(); } @@ -206,7 +208,7 @@ bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVec } // op::v0::ShapeOf -op::v0::ShapeOf::ShapeOf(const Output& arg) : Op({arg}) { +op::v0::ShapeOf::ShapeOf(const Output& arg) : ShapeOfBase({arg}) { constructor_validate_and_infer_types(); } diff --git a/src/core/src/pass/constant_folding.cpp b/src/core/src/pass/constant_folding.cpp index dbdbc8f06f5..c749a95e24e 100644 --- a/src/core/src/pass/constant_folding.cpp +++ b/src/core/src/pass/constant_folding.cpp @@ -9,9 +9,10 @@ #include "openvino/core/rt_info.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/op/util/read_value_base.hpp" +#include "openvino/op/util/shape_of_base.hpp" #include "openvino/op/util/sub_graph_base.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset3.hpp" using namespace std; @@ -52,6 +53,7 @@ const auto friendly_name_from = [](const ov::Node& node, const size_t output_cou bool ov::pass::ConstantFolding::run_on_model(const std::shared_ptr& model) { RUN_ON_MODEL_SCOPE(ConstantFolding); + bool rewritten = pre_calculated_values_folding(model); for (const auto& node : model->get_ordered_ops()) { @@ -76,8 +78,11 @@ bool ov::pass::ConstantFolding::run_on_model(const std::shared_ptr& m replacement.get_node()->set_friendly_name(friendly_name_from(*node, replacements.size(), i)); node_output.replace(replacement); - // Propagate runtime info attributes to replacement consumer nodes - copy_runtime_info_to_target_inputs(node, replacement); + // Copy runtime info from source nodes + // when it was not propogated during pre-calculation + copy_runtime_info_from_input_values(node); + // Propagate runtime info attributes to replacement + copy_runtime_info(node, replacement.get_node_shared_ptr()); rewritten = true; } @@ -96,12 +101,17 @@ bool ov::pass::ConstantFolding::run_on_model(const std::shared_ptr& m return rewritten; } -void ov::pass::ConstantFolding::copy_runtime_info_to_target_inputs(const std::shared_ptr& node, - const Output& replacement) { - for (auto& input : replacement.get_target_inputs()) { - auto consumer = input.get_node()->shared_from_this(); - copy_runtime_info({node, consumer}, consumer); +void ov::pass::ConstantFolding::copy_runtime_info_from_input_values(const std::shared_ptr& node) { + if (is_type(node)) { + // Don't propogate names of ShapeOf source node since it is not fused itself + return; } + // Add node itself to merge original rt info with rt info of inputs + ov::NodeVector from = {node}; + for (auto& input : node->input_values()) { + from.push_back(input.get_node_shared_ptr()); + } + copy_runtime_info(from, node); } bool ov::pass::ConstantFolding::pre_calculated_values_folding(const std::shared_ptr& model) { @@ -115,14 +125,20 @@ bool ov::pass::ConstantFolding::pre_calculated_values_folding(const std::shared_ if (constant_folding_is_disabled(node)) { can_be_folded = false; - } else if (is_type(node) || is_type(node)) { + } else if (is_type(node)) { // In case if node is ShapeOf operation we stop propagation of can_be_folded attribute. We have to limit // propagation because we can't detect borders of shape_of sub-graphs, so we propagate can_be_folded // attribute through all nodes including nodes on data path. So to limit the spread of attribute to other // shape-of sub-graphs we do not propagate it through ShapeOf nodes. - can_be_folded = true; + can_be_folded = input_values.begin()->get_partial_shape().is_static(); + } else if (op::util::is_parameter(node) || op::util::is_output(node) || op::util::is_sink(node) || + is_type(node)) { + can_be_folded = false; } else { can_be_folded = std::all_of(input_values.cbegin(), input_values.cend(), is_output_foldable); + if (input_values.size() && can_be_folded) { + copy_runtime_info_from_input_values(node); + } } node->get_rt_info()["can_be_folded"] = can_be_folded; } @@ -151,8 +167,8 @@ bool ov::pass::ConstantFolding::pre_calculated_values_folding(const std::shared_ friendly_name_from(*input_node, input_node->get_output_size(), output.get_index())); output.replace(replacement); - // Propagate runtime info attributes to replacement consumer nodes - copy_runtime_info_to_target_inputs(input_node, replacement); + // Propagate runtime info attributes to replacement + copy_runtime_info(input_node, replacement); rewritten = true; } diff --git a/src/core/src/rt_info.cpp b/src/core/src/rt_info.cpp index 4baa061fbb2..c4dc167e94a 100644 --- a/src/core/src/rt_info.cpp +++ b/src/core/src/rt_info.cpp @@ -4,17 +4,18 @@ #include "openvino/core/rt_info.hpp" -#include "ngraph/variant.hpp" +#include "openvino/op/util/op_types.hpp" namespace { -std::unordered_map> get_copyable_attrs(const ov::OutputVector& outputs) { +std::unordered_map> get_copyable_attrs(const ov::OutputVector& outputs, + const ov::Output& to) { std::unordered_map> attrs; for (const auto& output : outputs) { for (const auto& item : output.get_rt_info()) { bool copy = true; if (item.second.is()) { - copy = item.second.as().is_copyable(); + copy = item.second.as().is_copyable(to.get_node_shared_ptr()); } if (copy) { attrs[item.first].push_back(item.second); @@ -24,13 +25,14 @@ std::unordered_map> get_copyable_attrs(const o return attrs; } -std::unordered_map> get_copyable_attrs(const ov::NodeVector& nodes) { +std::unordered_map> get_copyable_attrs(const ov::NodeVector& nodes, + const std::shared_ptr& to) { std::unordered_map> attrs; for (const auto& node : nodes) { for (const auto& item : node->get_rt_info()) { bool copy = item.first != "opset"; if (item.second.is()) { - copy = copy && item.second.as().is_copyable(); + copy = copy && item.second.as().is_copyable(to); } if (copy) { attrs[item.first].push_back(item.second); @@ -41,8 +43,8 @@ std::unordered_map> get_copyable_attrs(const o } template -ov::Node::RTMap mergeRuntimeInfo(const T& items) { - std::unordered_map> attrs = get_copyable_attrs(items); +ov::Node::RTMap mergeRuntimeInfo(const std::vector& items, const T& to) { + std::unordered_map> attrs = get_copyable_attrs(items, to); ov::Node::RTMap merged_attrs; for (auto& item : attrs) { @@ -80,50 +82,61 @@ void assign_runtime_info(const ov::Node::RTMap& from, ov::Node::RTMap& to) { } } +ov::NodeVector list_with_constants(const ov::NodeVector& to) { + ov::NodeVector ops = to; + for (auto& node : to) { + if (!node) { + continue; + } + for (auto& input : node->inputs()) { + auto source_node = input.get_source_output().get_node_shared_ptr(); + if (ov::op::util::is_constant(source_node) && (0 == source_node->get_rt_info().size())) { + if (std::find(ops.begin(), ops.end(), source_node) == ops.end()) { + ops.push_back(source_node); + } + } + } + } + return ops; +} + +ov::OutputVector list_with_constants(const ov::OutputVector& to) { + ov::OutputVector ops = to; + for (auto& node : to) { + for (auto& input : node.get_node()->inputs()) { + auto source_node = input.get_source_output(); + if (ov::op::util::is_constant(source_node.get_node_shared_ptr()) && + (0 == source_node.get_rt_info().size())) { + if (std::find(ops.begin(), ops.end(), source_node) == ops.end()) { + ops.push_back(source_node); + } + } + } + } + return ops; +} } // namespace void ov::copy_runtime_info(const std::shared_ptr& from, const std::shared_ptr& to) { - auto& attrs = to->get_rt_info(); - auto opset = get_opset(attrs); - - for (const auto& item : from->get_rt_info()) { - bool copy = item.first != "opset"; - if (item.second.is()) { - copy = copy && item.second.as().is_copyable(); - } - if (copy) { - attrs[item.first] = item.second; - } - } - - if (!opset.empty()) { - attrs["opset"] = opset; - } + return copy_runtime_info(ov::NodeVector{from}, ov::NodeVector{to}); } void ov::copy_runtime_info(const std::shared_ptr& from, ov::NodeVector to) { - for (auto& op : to) { - copy_runtime_info(from, op); - } + return copy_runtime_info(ov::NodeVector{from}, to); } void ov::copy_runtime_info(const ov::NodeVector& from, const std::shared_ptr& to) { - auto& rtInfoTo = to->get_rt_info(); - assign_runtime_info(mergeRuntimeInfo(from), rtInfoTo); + return copy_runtime_info(from, ov::NodeVector{to}); } void ov::copy_runtime_info(const ov::NodeVector& from, ov::NodeVector to) { - auto mergedInfo = mergeRuntimeInfo(from); - for (auto& node : to) { - auto& rtInfoTo = node->get_rt_info(); - assign_runtime_info(mergedInfo, rtInfoTo); + for (auto& node : list_with_constants(to)) { + assign_runtime_info(mergeRuntimeInfo(from, node), node->get_rt_info()); } } void ov::copy_output_runtime_info(const ov::OutputVector& from, ov::OutputVector to) { - auto mergedInfo = mergeRuntimeInfo(from); - for (auto& node : to) { - auto& rtInfoTo = node.get_rt_info(); - assign_runtime_info(mergedInfo, rtInfoTo); + for (auto& node : list_with_constants(to)) { + assign_runtime_info(mergeRuntimeInfo(from, node), node.get_rt_info()); } } diff --git a/src/core/src/runtime_attribute.cpp b/src/core/src/runtime_attribute.cpp index 91a0606a08d..97a54fdc9c5 100644 --- a/src/core/src/runtime_attribute.cpp +++ b/src/core/src/runtime_attribute.cpp @@ -32,6 +32,10 @@ bool RuntimeAttribute::is_copyable() const { return true; } +bool RuntimeAttribute::is_copyable(const std::shared_ptr& to) const { + return is_copyable(); +} + std::ostream& operator<<(std::ostream& os, const RuntimeAttribute& attrubute) { return os << attrubute.to_string(); } diff --git a/src/core/tests/constant_folding.cpp b/src/core/tests/constant_folding.cpp index b583533449f..5d542a8eac9 100644 --- a/src/core/tests/constant_folding.cpp +++ b/src/core/tests/constant_folding.cpp @@ -18,9 +18,13 @@ using namespace ngraph; using namespace std; +static std::shared_ptr get_result_constant(std::shared_ptr f, size_t pos = 0) { + return ov::as_type_ptr(f->get_results().at(pos)->input_value(0).get_node_shared_ptr()); +} + template -static std::vector get_result_constant(std::shared_ptr f, size_t pos) { - auto new_const = ov::as_type_ptr(f->get_results().at(pos)->input_value(0).get_node_shared_ptr()); +static std::vector get_result_constant_data(std::shared_ptr f, size_t pos) { + auto new_const = get_result_constant(f, pos); return new_const->cast_vector(); } @@ -38,6 +42,66 @@ typename std::enable_if::value>::type range_test_check(const ASSERT_EQ(values_out, values_expected); } +std::ostream& operator<<(std::ostream& os, const std::vector& s) { + os << "["; + for (auto it = s.begin(); it != s.end(); ++it) { + if (it != s.begin()) { + os << ", " << *it; + } else { + os << *it; + } + } + os << "]"; + return os; +} + +void run_constant_folding(std::shared_ptr& model) { + pass::Manager pass_manager; + pass_manager.register_pass(); + pass_manager.register_pass(); + pass_manager.run_passes(model); +} + +static void check_names(const std::shared_ptr& node, + const std::vector& expected_fused_names, + const std::string expected_name = "test", + bool exact = true) { + EXPECT_TRUE(node); + + // Check node name + ASSERT_EQ(node->get_friendly_name(), expected_name); + + // Check fused name + ASSERT_TRUE(!expected_fused_names.empty()); + std::vector fused_names = ngraph::getFusedNamesVector(node); + if (exact) { + std::vector expected_sorted = expected_fused_names; + std::sort(fused_names.begin(), fused_names.end()); + std::sort(expected_sorted.begin(), expected_sorted.end()); + bool is_equal = std::equal(fused_names.begin(), fused_names.end(), expected_sorted.begin()); + std::stringstream ss; + if (!is_equal) { + ss << "Expected names are not matched to the fused names. Expected '" << expected_fused_names + << "' but actually received '" << fused_names << "'"; + } + ASSERT_TRUE(is_equal) << ss.str(); + } else { + bool is_expected_name_missed = false; + for (auto& name : expected_fused_names) { + if (std::find(fused_names.begin(), fused_names.end(), name) == fused_names.end()) { + is_expected_name_missed = true; + break; + } + } + std::stringstream ss; + if (is_expected_name_missed) { + ss << "Not all expected names are found in fused names. Expected '" << expected_fused_names + << "' but actually received '" << fused_names << "'"; + } + ASSERT_FALSE(is_expected_name_missed) << ss.str(); + } +} + TEST(constant_folding, acosh) { Shape shape_in{2, 4, 1}; @@ -47,21 +111,21 @@ TEST(constant_folding, acosh) { expected.push_back(std::acosh(f)); } auto constant = make_shared(element::f32, shape_in, values_in); + constant->set_friendly_name("constant"); auto acosh = make_shared(constant); acosh->set_friendly_name("test"); auto f = make_shared(acosh, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); EXPECT_EQ(count_ops_of_type(f), 0); EXPECT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(f->get_results().size(), 1); - auto new_const = ov::as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); EXPECT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + + check_names(new_const, {"constant", "test"}); auto values_out = new_const->get_vector(); EXPECT_TRUE(test::all_close_f(expected, values_out, MIN_FLOAT_TOLERANCE_BITS)); @@ -76,21 +140,20 @@ TEST(constant_folding, asinh) { expected.push_back(std::asinh(f)); } auto constant = make_shared(element::f32, shape_in, values_in); + constant->set_friendly_name("constant"); auto asinh = make_shared(constant); asinh->set_friendly_name("test"); auto f = make_shared(asinh, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); EXPECT_EQ(count_ops_of_type(f), 0); EXPECT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(f->get_results().size(), 1); - auto new_const = ov::as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); EXPECT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "test"}); auto values_out = new_const->get_vector(); EXPECT_TRUE(test::all_close_f(expected, values_out, MIN_FLOAT_TOLERANCE_BITS)); @@ -105,21 +168,20 @@ TEST(constant_folding, atanh) { expected.push_back(std::atanh(f)); } auto constant = make_shared(element::f32, shape_in, values_in); + constant->set_friendly_name("constant"); auto atanh = make_shared(constant); atanh->set_friendly_name("test"); auto f = make_shared(atanh, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); EXPECT_EQ(count_ops_of_type(f), 0); EXPECT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(f->get_results().size(), 1); - auto new_const = ov::as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); EXPECT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "test"}); auto values_out = new_const->get_vector(); EXPECT_TRUE(test::all_close_f(expected, values_out, MIN_FLOAT_TOLERANCE_BITS)); @@ -132,22 +194,22 @@ TEST(constant_folding, constant_squeeze) { vector values_in{0, 1, 2, 3, 4, 5, 6, 7}; auto constant = make_shared(element::f32, shape_in, values_in); + constant->set_friendly_name("constant"); vector values_axes{2}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto squeeze = make_shared(constant, constant_axes); squeeze->set_friendly_name("test"); auto f = make_shared(squeeze, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + auto new_const = get_result_constant(f); + EXPECT_TRUE(new_const); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), shape_out); auto values_out = new_const->get_vector(); @@ -161,22 +223,22 @@ TEST(constant_folding, constant_unsqueeze) { vector values_in{0, 1, 2, 3, 4, 5, 6, 7}; auto constant = make_shared(element::f32, shape_in, values_in); + constant->set_friendly_name("constant"); vector values_axes{2, 3}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto unsqueeze = make_shared(constant, constant_axes); unsqueeze->set_friendly_name("test"); auto f = make_shared(unsqueeze, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), shape_out); auto values_out = new_const->get_vector(); @@ -186,24 +248,25 @@ TEST(constant_folding, constant_unsqueeze) { TEST(constant_folding, constant_broadcast_v1) { vector values_in{0, 1}; auto constant_in = make_shared(element::i32, Shape{2}, values_in); + constant_in->set_friendly_name("constant_in"); vector shape_in{2, 4}; auto constant_shape = make_shared(element::i64, Shape{2}, shape_in); + constant_shape->set_friendly_name("constant_shape"); vector axes_in{0}; auto constant_axes = make_shared(element::i64, Shape{1}, axes_in); + constant_axes->set_friendly_name("constant_axes"); auto broadcast_v1 = make_shared(constant_in, constant_shape, constant_axes); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "constant_shape", "constant_axes", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 0, 0, 0, 1, 1, 1, 1}; @@ -213,22 +276,22 @@ TEST(constant_folding, constant_broadcast_v1) { TEST(constant_folding, constant_broadcast_v1_with_target_shape) { vector values_in{1}; auto constant_in = make_shared(element::i32, Shape{1, 1, 1, 1}, values_in); + constant_in->set_friendly_name("constant_in"); vector shape_in{1, 3, 1, 1}; auto target_shape = make_shared(element::i64, Shape{4}, shape_in); + target_shape->set_friendly_name("target_shape"); auto broadcast_v1 = make_shared(constant_in, target_shape); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "target_shape", "test"}, "test", false); auto values_out = new_const->get_vector(); vector values_expected{1, 1, 1}; @@ -238,22 +301,22 @@ TEST(constant_folding, constant_broadcast_v1_with_target_shape) { TEST(constant_folding, constant_broadcast_v1_numpy) { vector values_in{0, 1}; auto constant_in = make_shared(element::i32, Shape{2}, values_in); + constant_in->set_friendly_name("constant_in"); vector shape_in{4, 2}; auto constant_shape = make_shared(element::i64, Shape{2}, shape_in); + constant_shape->set_friendly_name("constant_shape"); auto broadcast_v1 = make_shared(constant_in, constant_shape); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "constant_shape", "test"}, "test", false); auto values_out = new_const->get_vector(); vector values_expected{0, 1, 0, 1, 0, 1, 0, 1}; @@ -273,57 +336,105 @@ TEST(constant_folding, constant_unary_binary) { vector values_j{-3, 5}; vector values_k{3, 5}; auto a = make_shared(element::i32, Shape{2, 2}, values_a); + a->set_friendly_name("a"); auto b = make_shared(element::i32, Shape{2, 2}, values_b); + b->set_friendly_name("b"); auto c = make_shared(element::i32, Shape{2, 2}, values_c); + c->set_friendly_name("c"); auto d = make_shared(element::i32, Shape{2, 2}, values_d); + d->set_friendly_name("d"); auto e = make_shared(element::i32, Shape{2}, values_e); + e->set_friendly_name("e"); auto f = make_shared(element::i32, Shape{2}, values_f); + f->set_friendly_name("f"); auto g = make_shared(element::i32, Shape{2}, values_g); + g->set_friendly_name("g"); auto h = make_shared(element::boolean, Shape{2, 2}, values_h); + h->set_friendly_name("h"); auto i = make_shared(element::boolean, Shape{2}, values_i); + i->set_friendly_name("i"); auto j = make_shared(element::i8, Shape{2}, values_j); + j->set_friendly_name("j"); auto k = make_shared(element::u8, Shape{2}, values_k); + k->set_friendly_name("k"); auto doubles = make_shared(element::f64, Shape{2}, std::vector{4.0, 9.0}); + doubles->set_friendly_name("doubles"); auto doubles2 = make_shared(element::f64, Shape{2}, std::vector{4.0, 1.0}); + doubles2->set_friendly_name("doubles2"); auto shorts = make_shared(element::i16, Shape{3}, std::vector{14, -3, -3}); + shorts->set_friendly_name("shorts"); auto shorts2 = make_shared(element::i16, Shape{1}, std::vector{-3}); + shorts2->set_friendly_name("shorts2"); auto unsigned_shorts = make_shared(element::u16, Shape{3}, std::vector{14, 300, 14}); + unsigned_shorts->set_friendly_name("unsigned_shorts"); auto unsigned_shorts2 = make_shared(element::u16, Shape{1}, std::vector{300}); + unsigned_shorts2->set_friendly_name("unsigned_shorts2"); auto add = make_shared(a, b); + add->set_friendly_name("add"); auto sub = make_shared(a, b); + sub->set_friendly_name("sub"); auto mul = make_shared(a, b); + mul->set_friendly_name("mul"); auto divn = make_shared(a, b); + divn->set_friendly_name("divn"); auto pow = make_shared(a, b); + pow->set_friendly_name("pow"); auto min = make_shared(c, a); + min->set_friendly_name("min"); auto max = make_shared(a, c); + max->set_friendly_name("max"); auto absn = make_shared(c); + absn->set_friendly_name("absn"); auto neg = make_shared(c); + neg->set_friendly_name("neg"); auto sqrt = make_shared(d); + sqrt->set_friendly_name("sqrt"); auto add_autob_numpy = make_shared(a, e, op::AutoBroadcastType::NUMPY); + add_autob_numpy->set_friendly_name("add_autob_numpy"); auto sub_autob_numpy = make_shared(a, e, op::AutoBroadcastType::NUMPY); + sub_autob_numpy->set_friendly_name("sub_autob_numpy"); auto mul_autob_numpy = make_shared(a, e, op::AutoBroadcastType::NUMPY); + mul_autob_numpy->set_friendly_name("mul_autob_numpy"); auto div_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + div_autob_numpy->set_friendly_name("div_autob_numpy"); auto pow_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + pow_autob_numpy->set_friendly_name("pow_autob_numpy"); auto min_autob_numpy = make_shared(a, f, op::AutoBroadcastType::NUMPY); + min_autob_numpy->set_friendly_name("min_autob_numpy"); auto max_autob_numpy = make_shared(a, f, op::AutoBroadcastType::NUMPY); + max_autob_numpy->set_friendly_name("max_autob_numpy"); auto equal_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + equal_autob_numpy->set_friendly_name("equal_autob_numpy"); auto not_equal_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + not_equal_autob_numpy->set_friendly_name("not_equal_autob_numpy"); auto greater_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + greater_autob_numpy->set_friendly_name("greater_autob_numpy"); auto greater_eq_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + greater_eq_autob_numpy->set_friendly_name("greater_eq_autob_numpy"); auto less_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + less_autob_numpy->set_friendly_name("less_autob_numpy"); auto less_eq_autob_numpy = make_shared(a, g, op::AutoBroadcastType::NUMPY); + less_eq_autob_numpy->set_friendly_name("less_eq_autob_numpy"); auto logical_or_autob_numpy = make_shared(h, i, op::AutoBroadcastType::NUMPY); + logical_or_autob_numpy->set_friendly_name("logical_or_autob_numpy"); auto logical_xor_autob_numpy = make_shared(h, i, op::AutoBroadcastType::NUMPY); + logical_xor_autob_numpy->set_friendly_name("logical_xor_autob_numpy"); auto doubles_sqrt = make_shared(doubles); + doubles_sqrt->set_friendly_name("doubles_sqrt"); auto sub_int8 = make_shared(j, j); + sub_int8->set_friendly_name("sub_int8"); auto sub_uint8 = make_shared(k, k); + sub_uint8->set_friendly_name("sub_uint8"); auto equal_doubles = make_shared(doubles, doubles2, op::AutoBroadcastType::NUMPY); + equal_doubles->set_friendly_name("equal_doubles"); auto equal_shorts = make_shared(shorts, shorts2, op::AutoBroadcastType::NUMPY); + equal_shorts->set_friendly_name("equal_shorts"); auto equal_unsigned_shorts = make_shared(unsigned_shorts, unsigned_shorts2, op::AutoBroadcastType::NUMPY); - + equal_unsigned_shorts->set_friendly_name("equal_unsigned_shorts"); auto neg_sqrt = make_shared(c); + neg_sqrt->set_friendly_name("neg_sqrt"); auto func = make_shared(NodeVector{add, sub, @@ -359,9 +470,7 @@ TEST(constant_folding, constant_unary_binary) { ParameterVector{}); auto func_error = make_shared(NodeVector{neg_sqrt}, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(func); + run_constant_folding(func); // expected values vector add_expected{2, 4, 6, 8}; @@ -395,57 +504,92 @@ TEST(constant_folding, constant_unary_binary) { vector equal_shorts_expected{0, 1, 1}; vector equal_unsigned_shorts_expected{0, 1, 0}; - ASSERT_EQ(get_result_constant(func, 0), add_expected); - ASSERT_EQ(get_result_constant(func, 1), sub_expected); - ASSERT_EQ(get_result_constant(func, 2), mul_expected); - ASSERT_EQ(get_result_constant(func, 3), div_expected); - ASSERT_EQ(get_result_constant(func, 4), pow_expected); - ASSERT_EQ(get_result_constant(func, 5), min_expected); - ASSERT_EQ(get_result_constant(func, 6), max_expected); - ASSERT_EQ(get_result_constant(func, 7), abs_neg_expected); - ASSERT_EQ(get_result_constant(func, 8), abs_neg_expected); - ASSERT_EQ(get_result_constant(func, 9), sqrt_expected); - ASSERT_EQ(get_result_constant(func, 10), add_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 11), sub_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 12), mul_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 13), div_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 14), pow_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 15), min_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 16), max_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 17), equal_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 18), not_equal_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 19), greater_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 20), greater_eq_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 21), less_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 22), less_eq_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 23), logical_or_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 24), logical_xor_autob_numpy_expected); - ASSERT_EQ(get_result_constant(func, 25), doubles_sqrt_expected); - ASSERT_EQ(get_result_constant(func, 26), sub_int8_expected); - ASSERT_EQ(get_result_constant(func, 27), sub_uint8_expected); - ASSERT_EQ(get_result_constant(func, 28), equal_doubles_expected); - ASSERT_EQ(get_result_constant(func, 29), equal_shorts_expected); - ASSERT_EQ(get_result_constant(func, 30), equal_unsigned_shorts_expected); + size_t index = 0; + ASSERT_EQ(get_result_constant_data(func, index), add_expected); + check_names(get_result_constant(func, index++), {"a", "b", "add"}, "add"); + ASSERT_EQ(get_result_constant_data(func, index), sub_expected); + check_names(get_result_constant(func, index++), {"a", "b", "sub"}, "sub"); + ASSERT_EQ(get_result_constant_data(func, index), mul_expected); + check_names(get_result_constant(func, index++), {"a", "b", "mul"}, "mul"); + ASSERT_EQ(get_result_constant_data(func, index), div_expected); + check_names(get_result_constant(func, index++), {"a", "b", "divn"}, "divn"); + ASSERT_EQ(get_result_constant_data(func, index), pow_expected); + check_names(get_result_constant(func, index++), {"a", "b", "pow"}, "pow"); + ASSERT_EQ(get_result_constant_data(func, index), min_expected); + check_names(get_result_constant(func, index++), {"c", "a", "min"}, "min"); + ASSERT_EQ(get_result_constant_data(func, index), max_expected); + check_names(get_result_constant(func, index++), {"a", "c", "max"}, "max"); + ASSERT_EQ(get_result_constant_data(func, index), abs_neg_expected); + check_names(get_result_constant(func, index++), {"c", "absn"}, "absn"); + ASSERT_EQ(get_result_constant_data(func, index), abs_neg_expected); + check_names(get_result_constant(func, index++), {"c", "neg"}, "neg"); + ASSERT_EQ(get_result_constant_data(func, index), sqrt_expected); + check_names(get_result_constant(func, index++), {"d", "sqrt"}, "sqrt"); + ASSERT_EQ(get_result_constant_data(func, index), add_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "e", "add_autob_numpy"}, "add_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), sub_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "e", "sub_autob_numpy"}, "sub_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), mul_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "e", "mul_autob_numpy"}, "mul_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), div_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "div_autob_numpy"}, "div_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), pow_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "pow_autob_numpy"}, "pow_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), min_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "f", "min_autob_numpy"}, "min_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), max_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "f", "max_autob_numpy"}, "max_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), equal_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "equal_autob_numpy"}, "equal_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), not_equal_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "not_equal_autob_numpy"}, "not_equal_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), greater_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "greater_autob_numpy"}, "greater_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), greater_eq_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "greater_eq_autob_numpy"}, "greater_eq_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), less_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "less_autob_numpy"}, "less_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), less_eq_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"a", "g", "less_eq_autob_numpy"}, "less_eq_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), logical_or_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"h", "i", "logical_or_autob_numpy"}, "logical_or_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), logical_xor_autob_numpy_expected); + check_names(get_result_constant(func, index++), {"h", "i", "logical_xor_autob_numpy"}, "logical_xor_autob_numpy"); + ASSERT_EQ(get_result_constant_data(func, index), doubles_sqrt_expected); + check_names(get_result_constant(func, index++), {"doubles", "doubles_sqrt"}, "doubles_sqrt"); + ASSERT_EQ(get_result_constant_data(func, index), sub_int8_expected); + check_names(get_result_constant(func, index++), {"j", "sub_int8"}, "sub_int8"); + ASSERT_EQ(get_result_constant_data(func, index), sub_uint8_expected); + check_names(get_result_constant(func, index++), {"k", "sub_uint8"}, "sub_uint8"); + ASSERT_EQ(get_result_constant_data(func, index), equal_doubles_expected); + check_names(get_result_constant(func, index++), {"doubles", "doubles2", "equal_doubles"}, "equal_doubles"); + ASSERT_EQ(get_result_constant_data(func, index), equal_shorts_expected); + check_names(get_result_constant(func, index++), {"shorts", "shorts2", "equal_shorts"}, "equal_shorts"); + ASSERT_EQ(get_result_constant_data(func, index), equal_unsigned_shorts_expected); + check_names(get_result_constant(func, index++), + {"unsigned_shorts", "unsigned_shorts2", "equal_unsigned_shorts"}, + "equal_unsigned_shorts"); + + pass::Manager pass_manager; ASSERT_NO_THROW(pass_manager.run_passes(func_error)); } template static void test_const_convert(const vector& values_in, const vector& values_expected) { auto constant = op::Constant::create(from, Shape{values_in.size()}, values_in); + constant->set_friendly_name("constant"); auto convert = make_shared(constant, to); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "test"}); ASSERT_EQ(new_const->get_output_element_type(0), to); auto values_out = new_const->template cast_vector(); @@ -514,20 +658,19 @@ TEST(constant_folding, shape_of_v0) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; auto param = make_shared(element::boolean, input_shape); + param->set_friendly_name("param"); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"test"}); ASSERT_EQ(new_const->get_output_element_type(0), element::i64); auto values_out = new_const->get_vector(); @@ -538,20 +681,19 @@ TEST(constant_folding, shape_of_v3) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; auto param = make_shared(element::boolean, input_shape); + param->set_friendly_name("param"); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"test"}); ASSERT_EQ(new_const->get_output_element_type(0), element::i64); auto values_out = new_const->get_vector(); @@ -562,20 +704,19 @@ TEST(constant_folding, shape_of_i32_v3) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; auto param = make_shared(element::boolean, input_shape); + param->set_friendly_name("param"); auto shape_of = make_shared(param, element::i32); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"test"}); ASSERT_EQ(new_const->get_output_element_type(0), element::i32); auto values_out = new_const->get_vector(); @@ -590,15 +731,13 @@ TEST(constant_folding, shape_of_dynamic_v0) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(f->get_ops().size(), 3); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } TEST(constant_folding, shape_of_dynamic_v3) { @@ -609,15 +748,13 @@ TEST(constant_folding, shape_of_dynamic_v3) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(f->get_ops().size(), 3); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } TEST(constant_folding, shape_of_dynamic_i32_v3) { @@ -628,15 +765,13 @@ TEST(constant_folding, shape_of_dynamic_i32_v3) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(f->get_ops().size(), 3); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } // We need to be sure that constant folding won't be calculated endlessly. @@ -648,16 +783,13 @@ TEST(constant_folding, shape_of_dynamic_double_folding_v0) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(f->get_ops().size(), 3); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } TEST(constant_folding, shape_of_dynamic_double_folding_v3) { @@ -668,16 +800,13 @@ TEST(constant_folding, shape_of_dynamic_double_folding_v3) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(f->get_ops().size(), 3); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } // Constant folding will not succeed on ShapeOf if the argument rank is dynamic. @@ -690,16 +819,14 @@ TEST(constant_folding, shape_of_rank_dynamic_v0) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 0); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } TEST(constant_folding, shape_of_rank_dynamic_v3) { @@ -710,16 +837,14 @@ TEST(constant_folding, shape_of_rank_dynamic_v3) { shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 0); auto result_shape_of = f->get_results().at(0)->get_input_node_shared_ptr(0); ASSERT_EQ(result_shape_of, shape_of); - ASSERT_EQ(result_shape_of->get_friendly_name(), "test"); + check_names(result_shape_of, {"test"}); } void const_reverse(const element::Type& axes_elem_type) { @@ -727,21 +852,21 @@ void const_reverse(const element::Type& axes_elem_type) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); auto axes = op::Constant::create(axes_elem_type, {1}, {1}); + axes->set_friendly_name("axes"); auto convert = make_shared(constant, axes, op::v1::Reverse::Mode::INDEX); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "axes", "test"}); auto values_out = new_const->get_vector(); vector values_expected{3, 2, 1, 6, 5, 4, 9, 8, 7}; @@ -767,23 +892,23 @@ TEST(constant_folding, const_reduceprod) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -799,23 +924,23 @@ TEST(constant_folding, const_reduceprod_keepdims) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -831,23 +956,22 @@ TEST(constant_folding, const_reducesum) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -863,23 +987,23 @@ TEST(constant_folding, const_reducesum_keepdims) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -895,23 +1019,23 @@ TEST(constant_folding, const_reducemax) { vector values_in{1, 2, 3, 4, 5, 6}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -927,23 +1051,23 @@ TEST(constant_folding, const_reducemax_keepdims) { vector values_in{1, 2, 3, 4, 5, 6}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -959,23 +1083,23 @@ TEST(constant_folding, const_reducemin) { vector values_in{1, 2, 3, 4, 5, 6}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -991,23 +1115,23 @@ TEST(constant_folding, const_reducemin_keepdims) { vector values_in{1, 2, 3, 4, 5, 6}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -1023,23 +1147,23 @@ TEST(constant_folding, const_reducemean) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -1055,23 +1179,23 @@ TEST(constant_folding, const_reducemean_keepdims) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; auto constant = op::Constant::create(element::i32, input_shape, values_in); + constant->set_friendly_name("constant"); Shape axes_shape{1}; vector values_axes{1}; auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + constant_axes->set_friendly_name("constant_axes"); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "constant_axes", "test"}); ASSERT_EQ(new_const->get_shape(), output_shape); auto values_out = new_const->get_vector(); @@ -1086,21 +1210,21 @@ TEST(constant_folding, const_reduce_logical_and__no_keepdims) { const vector values_in{0, 1, 1, 0, 1, 0, 1, 1, 1}; const auto data = op::Constant::create(element::boolean, input_shape, values_in); + data->set_friendly_name("data"); const auto axes = op::Constant::create(element::i64, {1}, {1}); + axes->set_friendly_name("axes"); const auto convert = make_shared(data, axes, false); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "axes", "test"}); const Shape expected_out_shape{3}; ASSERT_EQ(new_const->get_shape(), expected_out_shape); @@ -1117,21 +1241,21 @@ TEST(constant_folding, const_reduce_logical_and__keepdims) { const vector values_in{0, 1, 1, 0, 1, 0, 1, 1, 1}; const auto data = op::Constant::create(element::boolean, input_shape, values_in); + data->set_friendly_name("data"); const auto axes = op::Constant::create(element::i64, {1}, {1}); + axes->set_friendly_name("axes"); const auto convert = make_shared(data, axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "axes", "test"}); // the output shape is expected to have 'ones' at the positions specified in the reduction axes // in case the keep_dims attribute of ReduceLogicalAnd is set to true @@ -1150,21 +1274,21 @@ TEST(constant_folding, const_reduce_logical_and__keepdims_3d) { const vector values_in{1, 1, 0, 0, 1, 0, 0, 1}; const auto data = op::Constant::create(element::boolean, input_shape, values_in); + data->set_friendly_name("data"); const auto axes = op::Constant::create(element::i64, {2}, {0, 2}); + axes->set_friendly_name("axes"); const auto convert = make_shared(data, axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "axes", "test"}); const Shape expected_out_shape{1, 2, 1}; ASSERT_EQ(new_const->get_shape(), expected_out_shape); @@ -1181,21 +1305,21 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) { const vector values_in{1, 0, 0, 1, 0, 1, 0, 0, 0}; const auto data = op::Constant::create(element::boolean, input_shape, values_in); + data->set_friendly_name("data"); const auto axes = op::Constant::create(element::i64, {1}, {1}); + axes->set_friendly_name("axes"); const auto convert = make_shared(data, axes, false); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "axes", "test"}); const Shape expected_out_shape{3}; ASSERT_EQ(new_const->get_shape(), expected_out_shape); @@ -1209,21 +1333,21 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) { TEST(constant_folding, const_concat) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 1}, vector{7, 8}); + constant1->set_friendly_name("constant1"); auto concat = make_shared(NodeVector{constant0, constant1}, 1); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 2, 3, 7, 4, 5, 6, 8}; @@ -1233,22 +1357,22 @@ TEST(constant_folding, const_concat) { TEST(constant_folding, const_concat_3d_single_elem) { auto constant_1 = op::Constant::create(element::i32, Shape{1, 1, 1}, vector{1}); + constant_1->set_friendly_name("constant_1"); auto constant_2 = op::Constant::create(element::i32, Shape{1, 1, 1}, vector{2}); + constant_2->set_friendly_name("constant_2"); auto concat = make_shared(NodeVector{constant_1, constant_2}, 0); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_1", "constant_2", "test"}); ASSERT_EQ(new_const->get_output_shape(0), (Shape{2, 1, 1})); auto values_out = new_const->get_vector(); @@ -1258,24 +1382,24 @@ TEST(constant_folding, const_concat_3d_single_elem) { TEST(constant_folding, const_concat_axis_2) { auto constant_1 = op::Constant::create(element::i32, Shape{3, 1, 2}, vector{1, 2, 3, 4, 5, 6}); + constant_1->set_friendly_name("constant_1"); auto constant_2 = op::Constant::create(element::i32, Shape{3, 1, 4}, vector{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); + constant_2->set_friendly_name("constant_2"); auto concat = make_shared(NodeVector{constant_1, constant_2}, 2); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_1", "constant_2", "test"}); ASSERT_EQ(new_const->get_output_shape(0), (Shape{3, 1, 6})); auto values_out = new_const->get_vector(); @@ -1285,24 +1409,25 @@ TEST(constant_folding, const_concat_axis_2) { TEST(constant_folding, const_concat_axis_1_bool_type) { auto constant_1 = op::Constant::create(element::boolean, Shape{1, 1, 2}, vector{true, true}); + constant_1->set_friendly_name("constant_1"); auto constant_2 = op::Constant::create(element::boolean, Shape{1, 2, 2}, vector{true, false, true, false}); + constant_2->set_friendly_name("constant_2"); auto constant_3 = op::Constant::create(element::boolean, Shape{1, 3, 2}, vector{true, false, true, false, true, false}); + constant_3->set_friendly_name("constant_3"); auto concat = make_shared(NodeVector{constant_1, constant_2, constant_3}, 1); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_1", "constant_2", "constant_3", "test"}); ASSERT_EQ(new_const->get_output_shape(0), (Shape{1, 6, 2})); auto values_out = new_const->get_vector(); @@ -1312,20 +1437,19 @@ TEST(constant_folding, const_concat_axis_1_bool_type) { TEST(constant_folding, const_logical_not) { auto constant = op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 0, 0, 1, 1}); + constant->set_friendly_name("constant"); auto logical_not = make_shared(constant); logical_not->set_friendly_name("test"); auto f = make_shared(logical_not, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 0, 1, 1, 0, 0}; @@ -1335,21 +1459,21 @@ TEST(constant_folding, const_logical_not) { TEST(constant_folding, const_equal) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 1, 0, 0, 1, 1}; @@ -1359,21 +1483,21 @@ TEST(constant_folding, const_equal) { TEST(constant_folding, const_not_equal) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 0, 1, 1, 0, 0}; @@ -1383,21 +1507,21 @@ TEST(constant_folding, const_not_equal) { TEST(constant_folding, const_greater) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 0, 1, 0, 0, 1}; @@ -1407,21 +1531,21 @@ TEST(constant_folding, const_greater) { TEST(constant_folding, const_greater_eq) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 1, 1, 0, 1, 1}; @@ -1431,21 +1555,21 @@ TEST(constant_folding, const_greater_eq) { TEST(constant_folding, const_less) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 0, 0, 1, 0, 0}; @@ -1455,21 +1579,21 @@ TEST(constant_folding, const_less) { TEST(constant_folding, const_less_eq) { auto constant0 = op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 1, 0, 1, 1, 0}; @@ -1479,21 +1603,21 @@ TEST(constant_folding, const_less_eq) { TEST(constant_folding, const_or) { auto constant0 = op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 1, 1, 1, 1, 1}; @@ -1503,21 +1627,21 @@ TEST(constant_folding, const_or) { TEST(constant_folding, const_xor) { auto constant0 = op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); + constant0->set_friendly_name("constant0"); auto constant1 = op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); + constant1->set_friendly_name("constant1"); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant0", "constant1", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 1, 0, 1, 1, 0}; @@ -1528,20 +1652,19 @@ TEST(constant_folding, const_xor) { TEST(constant_folding, const_ceiling) { auto constant = op::Constant::create(element::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); + constant->set_friendly_name("constant"); auto ceil = make_shared(constant); ceil->set_friendly_name("test"); auto f = make_shared(ceil, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0.0f, 1.0f, 0.0f, -2.0f, 3.0f, 3.0f}; @@ -1552,20 +1675,19 @@ TEST(constant_folding, const_ceiling) { TEST(constant_folding, const_floor) { auto constant = op::Constant::create(element::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); + constant->set_friendly_name("constant"); auto floor = make_shared(constant); floor->set_friendly_name("test"); auto f = make_shared(floor, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0.0f, 0.0f, -1.0f, -3.0f, 2.0f, 3.0f}; @@ -1578,22 +1700,23 @@ TEST(constant_folding, const_gather_v1) { op::Constant::create(element::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); + constant_data->set_friendly_name("constant_data"); auto constant_indices = op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); + constant_indices->set_friendly_name("constant_indices"); auto constant_axis = op::Constant::create(element::i64, Shape{1}, vector{1}); + constant_axis->set_friendly_name("constant_axis"); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_data", "constant_indices", "constant_axis", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1.0f, 4.0f, 3.0f, 3.0f, 6.0f, 9.0f, 8.0f, 8.0f}; @@ -1606,22 +1729,23 @@ TEST(constant_folding, const_gather_v1_scalar) { op::Constant::create(element::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); + constant_data->set_friendly_name("constant_data"); auto constant_indices = op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); + constant_indices->set_friendly_name("constant_indices"); auto constant_axis = op::Constant::create(element::i64, Shape{}, vector{1}); + constant_axis->set_friendly_name("constant_axis"); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_data", "constant_indices", "constant_axis", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1.0f, 4.0f, 3.0f, 3.0f, 6.0f, 9.0f, 8.0f, 8.0f}; @@ -1636,26 +1760,27 @@ TEST(constant_folding, const_gather_v1_subgraph) { const auto C = make_shared(element::f32, Shape{1}); const int64_t axis = 0; const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + axis_const->set_friendly_name("axis_const"); const auto concat = make_shared(NodeVector{A, B_const, C}, axis); + concat->set_friendly_name("concat"); const vector indices{1}; const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + indices_const->set_friendly_name("indices_const"); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"axis_const", "concat", "indices_const", "test"}); const auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_out, {b_value}, MIN_FLOAT_TOLERANCE_BITS)); @@ -1668,26 +1793,27 @@ TEST(constant_folding, const_gather_v1_subgraph_neg_axis) { const auto C_const = op::Constant::create(element::f32, {1}, {b_value}); const int64_t axis = 0; const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + axis_const->set_friendly_name("axis_const"); const auto concat = make_shared(NodeVector{A, B, C_const}, axis); + concat->set_friendly_name("concat"); const vector indices{-1}; const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + indices_const->set_friendly_name("indices_const"); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"axis_const", "concat", "indices_const", "test"}); const auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_out, {b_value}, MIN_FLOAT_TOLERANCE_BITS)); @@ -1708,9 +1834,7 @@ TEST(constant_folding, const_gather_v1_subgraph_no_constant_input) { gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); @@ -1730,9 +1854,7 @@ TEST(constant_folding, const_gather_v1_subgraph_no_constant_input_scalar) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); @@ -1753,9 +1875,7 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_zero_axis) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -1775,9 +1895,7 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_single_indices) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -1797,9 +1915,7 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_concat_output_shape_dyna const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -1819,9 +1935,7 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_not_single_input) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -1832,22 +1946,23 @@ TEST(constant_folding, const_gather_v7) { op::Constant::create(element::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); + constant_data->set_friendly_name("constant_data"); auto constant_indices = op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); + constant_indices->set_friendly_name("constant_indices"); auto constant_axis = op::Constant::create(element::i64, Shape{1}, vector{1}); + constant_axis->set_friendly_name("constant_axis"); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_data", "constant_indices", "constant_axis", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1.0f, 4.0f, 3.0f, 3.0f, 6.0f, 9.0f, 8.0f, 8.0f}; @@ -1860,22 +1975,23 @@ TEST(constant_folding, const_gather_v7_scalar) { op::Constant::create(element::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); + constant_data->set_friendly_name("constant_data"); auto constant_indices = op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); + constant_indices->set_friendly_name("constant_indices"); auto constant_axis = op::Constant::create(element::i64, Shape{}, vector{1}); + constant_axis->set_friendly_name("constant_axis"); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_data", "constant_indices", "constant_axis", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1.0f, 4.0f, 3.0f, 3.0f, 6.0f, 9.0f, 8.0f, 8.0f}; @@ -1890,26 +2006,27 @@ TEST(constant_folding, const_gather_v7_subgraph) { const auto C = make_shared(element::f32, Shape{1}); const int64_t axis = 0; const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + axis_const->set_friendly_name("axis_const"); const auto concat = make_shared(NodeVector{A, B_const, C}, axis); + concat->set_friendly_name("concat"); const vector indices{1}; const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + indices_const->set_friendly_name("indices_const"); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"axis_const", "concat", "indices_const", "test"}); const auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_out, {b_value}, MIN_FLOAT_TOLERANCE_BITS)); @@ -1922,26 +2039,27 @@ TEST(constant_folding, const_gather_v7_subgraph_neg_axis) { const auto C_const = op::Constant::create(element::f32, {1}, {b_value}); const int64_t axis = 0; const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + axis_const->set_friendly_name("axis_const"); const auto concat = make_shared(NodeVector{A, B, C_const}, axis); + concat->set_friendly_name("concat"); const vector indices{-1}; const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + indices_const->set_friendly_name("indices_const"); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"axis_const", "concat", "indices_const", "test"}); const auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_out, {b_value}, MIN_FLOAT_TOLERANCE_BITS)); @@ -1962,9 +2080,7 @@ TEST(constant_folding, const_gather_v7_subgraph_no_constant_input) { gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); @@ -1984,9 +2100,7 @@ TEST(constant_folding, const_gather_v7_subgraph_no_constant_input_scalar) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 0); @@ -2007,9 +2121,7 @@ TEST(constant_folding, const_gather_v7_subgraph_skip_if_non_zero_axis) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -2029,9 +2141,7 @@ TEST(constant_folding, const_gather_v7_subgraph_skip_if_non_single_indices) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -2051,9 +2161,7 @@ TEST(constant_folding, const_gather_v7_subgraph_skip_if_concat_output_shape_dyna const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -2073,9 +2181,7 @@ TEST(constant_folding, const_gather_v7_subgraph_skip_if_not_single_input) { const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); @@ -2086,9 +2192,13 @@ TEST(constant_folding, const_strided_slice) { vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; auto constant = make_shared(element::i32, shape_in, values_in); + constant->set_friendly_name("constant"); auto begin = op::Constant::create(element::i64, {1}, {2}); + begin->set_friendly_name("begin"); auto end = op::Constant::create(element::i64, {1}, {15}); + end->set_friendly_name("end"); auto stride = op::Constant::create(element::i64, {1}, {3}); + stride->set_friendly_name("stride"); auto slice = make_shared(constant, begin, end, @@ -2099,16 +2209,14 @@ TEST(constant_folding, const_strided_slice) { auto f = make_shared(slice, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant", "begin", "end", "stride", "test"}); auto values_out = new_const->get_vector(); vector sliced_values{3, 6, 9, 12, 15}; @@ -2123,21 +2231,21 @@ TEST(constant_folding, constant_dyn_reshape) { vector values_shape{2, 4, 1}; auto constant_in = make_shared(element::f32, shape_in, values_in); + constant_in->set_friendly_name("constant_in"); auto constant_shape = make_shared(element::i64, shape_shape, values_shape); + constant_shape->set_friendly_name("constant_shape"); auto dyn_reshape = make_shared(constant_in, constant_shape, false); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "constant_shape", "test"}); auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_in, values_out, MIN_FLOAT_TOLERANCE_BITS)); @@ -2156,24 +2264,25 @@ TEST(constant_folding, constant_dyn_reshape_shape_not_originally_constant) { vector values_shape_b{1, 1, 1}; auto constant_in = make_shared(element::f32, shape_in, values_in); + constant_in->set_friendly_name("constant_in"); auto constant_shape_a = make_shared(element::i64, shape_shape, values_shape_a); + constant_shape_a->set_friendly_name("constant_shape_a"); auto constant_shape_b = make_shared(element::i64, shape_shape, values_shape_b); - auto dyn_reshape = make_shared(constant_in, - std::make_shared(constant_shape_a, constant_shape_b), - false); + constant_shape_b->set_friendly_name("constant_shape_b"); + auto add = std::make_shared(constant_shape_a, constant_shape_b); + add->set_friendly_name("add"); + auto dyn_reshape = make_shared(constant_in, add, false); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "constant_shape_a", "constant_shape_b", "add", "test"}); auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_in, values_out, MIN_FLOAT_TOLERANCE_BITS)); @@ -2188,9 +2297,7 @@ TEST(constant_folding, const_reshape_no_data_copy) { auto f = std::make_shared(NodeVector{consumer1, consumer2}, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); auto const1 = std::dynamic_pointer_cast(consumer1->input_value(0).get_node_shared_ptr()); auto const2 = std::dynamic_pointer_cast(consumer2->input_value(0).get_node_shared_ptr()); @@ -2210,9 +2317,7 @@ TEST(constant_folding, const_squeeze_no_data_copy) { auto f = std::make_shared(NodeVector{consumer1, consumer2}, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); auto const1 = std::dynamic_pointer_cast(consumer1->input_value(0).get_node_shared_ptr()); auto const2 = std::dynamic_pointer_cast(consumer2->input_value(0).get_node_shared_ptr()); @@ -2232,9 +2337,7 @@ TEST(constant_folding, const_unsqueeze_no_data_copy) { auto f = std::make_shared(NodeVector{consumer1, consumer2}, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); auto const1 = std::dynamic_pointer_cast(consumer1->input_value(0).get_node_shared_ptr()); auto const2 = std::dynamic_pointer_cast(consumer2->input_value(0).get_node_shared_ptr()); @@ -2253,21 +2356,21 @@ TEST(constant_folding, constant_transpose) { vector values_perm{1, 0}; auto constant_in = make_shared(element::f64, shape_in, values_in); + constant_in->set_friendly_name("constant_in"); auto constant_perm = make_shared(element::i64, shape_perm, values_perm); + constant_perm->set_friendly_name("constant_perm"); auto transpose = make_shared(constant_in, constant_perm); transpose->set_friendly_name("test"); auto f = make_shared(transpose, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "constant_perm", "test"}); auto values_out = new_const->get_vector(); vector values_permute{0, 4, 1, 5, 2, 6, 3, 7}; @@ -2281,22 +2384,23 @@ void range_test(T start, T stop, T step, const vector& values_expected) { vector values_step{step}; auto constant_start = make_shared(element::from(), Shape{}, values_start); + constant_start->set_friendly_name("constant_start"); auto constant_stop = make_shared(element::from(), Shape{}, values_stop); + constant_stop->set_friendly_name("constant_stop"); auto constant_step = make_shared(element::from(), Shape{}, values_step); + constant_step->set_friendly_name("constant_step"); auto range = make_shared(constant_start, constant_stop, constant_step); range->set_friendly_name("test"); auto f = make_shared(range, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_start", "constant_stop", "constant_step", "test"}); auto values_out = new_const->template get_vector(); @@ -2322,22 +2426,23 @@ TEST(constant_folding, constant_v1_select) { vector values_f{11, 12, 13, 14, 15, 16, 17, 18}; auto constant_selection = make_shared(element::boolean, Shape{4}, values_selection); + constant_selection->set_friendly_name("constant_selection"); auto constant_t = make_shared(element::i64, Shape{4}, values_t); + constant_t->set_friendly_name("constant_t"); auto constant_f = make_shared(element::i64, Shape{2, 4}, values_f); + constant_f->set_friendly_name("constant_f"); auto select = make_shared(constant_selection, constant_t, constant_f); select->set_friendly_name("test"); auto f = make_shared(select, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_selection", "constant_t", "constant_f", "test"}); auto values_out = new_const->get_vector(); vector values_expected{11, 2, 3, 14, 15, 2, 3, 18}; @@ -2353,16 +2458,14 @@ TEST(constant_folding, constant_v1_split) { auto split_v1 = make_shared(const_data, const_axis, num_splits); auto f = make_shared(split_v1->outputs(), ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res1 = get_result_constant(f); + auto res2 = get_result_constant(f, 1); + auto res3 = get_result_constant(f, 2); ASSERT_TRUE(res1); ASSERT_TRUE(res2); ASSERT_TRUE(res3); @@ -2384,16 +2487,14 @@ TEST(constant_folding, constant_v1_split_specialized) { auto split_v1 = make_shared(const_data, const_axis, num_splits); auto f = make_shared(split_v1->outputs(), ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res1 = get_result_constant(f); + auto res2 = get_result_constant(f, 1); + auto res3 = get_result_constant(f, 2); ASSERT_TRUE(res1); ASSERT_TRUE(res2); ASSERT_TRUE(res3); @@ -2416,32 +2517,32 @@ TEST(constant_folding, constant_v1_split_axis_1_4_splits) { 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); + const_data->set_friendly_name("const_data"); const auto const_axis = op::Constant::create(element::i64, Shape{}, {1}); + const_axis->set_friendly_name("const_axis"); const auto num_splits = 4; auto split_v1 = make_shared(const_data, const_axis, num_splits); split_v1->set_friendly_name("test"); auto f = make_shared(split_v1->outputs(), ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); - auto res4 = ov::as_type_ptr(f->get_results().at(3)->input_value(0).get_node_shared_ptr()); + auto res1 = get_result_constant(f); + auto res2 = get_result_constant(f, 1); + auto res3 = get_result_constant(f, 2); + auto res4 = get_result_constant(f, 3); ASSERT_TRUE(res1); - ASSERT_EQ(res1->get_friendly_name(), "test.0"); + check_names(res1, {"const_data", "const_axis", "test"}, "test.0"); ASSERT_TRUE(res2); - ASSERT_EQ(res2->get_friendly_name(), "test.1"); + check_names(res2, {"const_data", "const_axis", "test"}, "test.1"); ASSERT_TRUE(res3); - ASSERT_EQ(res3->get_friendly_name(), "test.2"); + check_names(res3, {"const_data", "const_axis", "test"}, "test.2"); ASSERT_TRUE(res4); - ASSERT_EQ(res4->get_friendly_name(), "test.3"); + check_names(res4, {"const_data", "const_axis", "test"}, "test.3"); auto res1_values = res1->get_vector(); ASSERT_EQ(vector({0, 1, 2, 3, 16, 17, 18, 19, 32, 33, 34, 35, 48, 49, 50, 51}), res1_values); @@ -2469,15 +2570,13 @@ TEST(constant_folding, constant_v1_split_axis_1_2_splits) { auto split_v1 = make_shared(const_data, const_axis, num_splits); auto f = make_shared(split_v1->outputs(), ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res1 = get_result_constant(f); + auto res2 = get_result_constant(f, 1); ASSERT_TRUE(res1); ASSERT_TRUE(res2); @@ -2508,15 +2607,13 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_2_splits) { auto variadic_split_v1 = make_shared(const_data, const_axis, constant_lengths); auto f = make_shared(variadic_split_v1->outputs(), ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), values_lengths.size()); - auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res1 = get_result_constant(f); + auto res2 = get_result_constant(f, 1); ASSERT_TRUE(res1); ASSERT_TRUE(res2); @@ -2546,16 +2643,14 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_3_splits_neg_length) { auto variadic_split_v1 = make_shared(const_data, const_axis, constant_lengths); auto f = make_shared(variadic_split_v1->outputs(), ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), values_lengths.size()); - auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res1 = get_result_constant(f); + auto res2 = get_result_constant(f, 1); + auto res3 = get_result_constant(f, 2); ASSERT_TRUE(res1); ASSERT_TRUE(res2); ASSERT_TRUE(res3); @@ -2584,14 +2679,12 @@ TEST(constant_folding, constant_v1_one_hot) { auto one_hot_v1 = make_shared(indices_const, depth_const, on_const, off_const, axis); auto f = make_shared(one_hot_v1, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res = get_result_constant(f); ASSERT_TRUE(res); ASSERT_EQ((Shape{3, 3}), res->get_output_shape(0)); @@ -2614,14 +2707,12 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes) { auto one_hot_v1 = make_shared(indices_const, depth_const, on_const, off_const, axis); auto f = make_shared(one_hot_v1, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res = get_result_constant(f); ASSERT_TRUE(res); ASSERT_EQ((Shape{4, 3}), res->get_output_shape(0)); @@ -2646,25 +2737,27 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes_2) { auto off_value = false; const auto indices_const = op::Constant::create(element::i64, Shape{2, 2}, indices); + indices_const->set_friendly_name("indices_const"); const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); + depth_const->set_friendly_name("depth_const"); const auto on_const = op::Constant::create(element::boolean, Shape{}, {on_value}); + on_const->set_friendly_name("on_const"); const auto off_const = op::Constant::create(element::boolean, Shape{}, {off_value}); + off_const->set_friendly_name("off_const"); int64_t axis = -1; auto one_hot_v1 = make_shared(indices_const, depth_const, on_const, off_const, axis); one_hot_v1->set_friendly_name("test"); auto f = make_shared(one_hot_v1, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res = get_result_constant(f); ASSERT_TRUE(res); - ASSERT_EQ(res->get_friendly_name(), "test"); + check_names(res, {"indices_const", "depth_const", "on_const", "off_const", "test"}); ASSERT_EQ((Shape{2, 2, 3}), res->get_output_shape(0)); ASSERT_EQ(vector({on_value, @@ -2689,22 +2782,22 @@ TEST(constant_folding, constant_tile_1d) { vector values_in{0, 1}; auto data = make_shared(element::i32, shape_in, values_in); + data->set_friendly_name("data"); vector values_repeats{2}; auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + repeats->set_friendly_name("repeats"); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "repeats", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 1, 0, 1}; @@ -2718,22 +2811,22 @@ TEST(constant_folding, constant_tile_3d_small_data_rank) { vector values_in{0, 1}; auto data = make_shared(element::i32, shape_in, values_in); + data->set_friendly_name("data"); vector values_repeats{2, 2, 2}; auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + repeats->set_friendly_name("repeats"); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "repeats", "test"}); auto values_out = new_const->get_vector(); vector values_expected{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; @@ -2747,22 +2840,22 @@ TEST(constant_folding, constant_tile_3d_few_repeats) { vector values_in{1, 2, 3, 4, 5, 6}; auto data = make_shared(element::i32, shape_in, values_in); + data->set_friendly_name("data"); vector values_repeats{2, 1}; auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + repeats->set_friendly_name("repeats"); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "repeats", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 2, 3, 1, 2, 3, 4, 5, 6, 4, 5, 6}; @@ -2776,22 +2869,22 @@ TEST(constant_folding, constant_tile_1d_0_repeats) { vector values_in{0, 1}; auto data = make_shared(element::i32, shape_in, values_in); + data->set_friendly_name("data"); vector values_repeats{0}; auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + repeats->set_friendly_name("repeats"); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "repeats", "test"}); auto values_out = new_const->get_vector(); vector values_expected{}; @@ -2805,22 +2898,22 @@ TEST(constant_folding, constant_tile_2d_0_repeats) { vector values_in{0, 1, 2, 3}; auto data = make_shared(element::i32, shape_in, values_in); + data->set_friendly_name("data"); vector values_repeats{0, 0}; auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + repeats->set_friendly_name("repeats"); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "repeats", "test"}); auto values_out = new_const->get_vector(); vector values_expected{}; @@ -2834,22 +2927,22 @@ TEST(constant_folding, constant_tile_0_rank_data) { vector values_in{1}; auto data = make_shared(element::i32, shape_in, values_in); + data->set_friendly_name("data"); vector values_repeats{4}; auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + repeats->set_friendly_name("repeats"); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "repeats", "test"}); auto values_out = new_const->get_vector(); vector values_expected{1, 1, 1, 1}; @@ -2858,22 +2951,21 @@ TEST(constant_folding, constant_tile_0_rank_data) { TEST(constant_folding, constant_non_zero_0D) { auto data = op::Constant::create(element::i32, Shape{}, {1}); + data->set_friendly_name("data"); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); // Fold into constant with shape of {1, 1} for scalar input with // non-zero value ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); const auto values_out = new_const->get_vector(); const vector values_expected{0}; @@ -2884,20 +2976,19 @@ TEST(constant_folding, constant_non_zero_0D) { TEST(constant_folding, constant_non_zero_1D) { vector values_in{0, 1, 0, 1}; auto data = make_shared(element::i32, Shape{4}, values_in); + data->set_friendly_name("data"); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); const auto values_out = new_const->get_vector(); const vector values_expected{1, 3}; @@ -2908,20 +2999,19 @@ TEST(constant_folding, constant_non_zero_1D) { TEST(constant_folding, constant_non_zero_int32_output_type) { vector values_in{0, 1, 0, 1}; auto data = make_shared(element::i32, Shape{4}, values_in); + data->set_friendly_name("data"); auto non_zero = make_shared(data, element::i32); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); ASSERT_EQ(element::i32, new_const->get_element_type()); const auto values_out = new_const->get_vector(); @@ -2933,20 +3023,19 @@ TEST(constant_folding, constant_non_zero_int32_output_type) { TEST(constant_folding, constant_non_zero_1D_all_indices) { const vector values_in{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; const auto data = make_shared(element::f32, Shape{values_in.size()}, values_in); + data->set_friendly_name("data"); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); const auto values_out = new_const->get_vector(); const vector values_expected{0, 1, 2, 3, 4, 5, 6, 7}; @@ -2957,20 +3046,19 @@ TEST(constant_folding, constant_non_zero_1D_all_indices) { TEST(constant_folding, constant_non_zero_2D) { vector values_in{1, 0, 0, 0, 1, 0, 1, 1, 0}; auto data = make_shared(element::i32, Shape{3, 3}, values_in); + data->set_friendly_name("data"); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); const auto values_out = new_const->get_vector(); const vector values_expected{0, 1, 2, 2, 0, 1, 0, 1}; @@ -2981,20 +3069,19 @@ TEST(constant_folding, constant_non_zero_2D) { TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) { const vector values_in{1, 1, 1, 1, 1, 1, 1, 1, 1}; const auto data = make_shared(element::i8, Shape{3, 3}, values_in); + data->set_friendly_name("data"); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); const auto values_out = new_const->get_vector(); const vector values_expected{0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2}; @@ -3005,41 +3092,39 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) { TEST(constant_folding, DISABLED_constant_non_zero_2D_all_zeros) { const vector values_in{0, 0, 0, 0, 0, 0}; const auto data = make_shared(element::u8, Shape{2, 3}, values_in); + data->set_friendly_name("data"); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); // fold into Constant with shape of {0} ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); ASSERT_EQ(shape_size(new_const->get_shape()), 0); } TEST(constant_folding, constant_non_zero_3D) { vector values_in{1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0}; auto data = make_shared(element::i32, Shape{2, 3, 3}, values_in); + data->set_friendly_name("data"); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"data", "test"}); const auto values_out = new_const->get_vector(); const vector values_expected{0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 2, 2, 2, @@ -3054,25 +3139,27 @@ TEST(constant_folding, constant_scatter_elements_update_basic) { const auto data_const = op::Constant::create(element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + data_const->set_friendly_name("data_const"); const auto indices_const = op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + indices_const->set_friendly_name("indices_const"); const auto updates_const = op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + updates_const->set_friendly_name("updates_const"); const auto axis_const = op::Constant::create(element::i64, Shape{}, {0}); + axis_const->set_friendly_name("axis_const"); auto scatter_elem_updt = make_shared(data_const, indices_const, updates_const, axis_const); scatter_elem_updt->set_friendly_name("test"); auto f = make_shared(scatter_elem_updt, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = get_result_constant(f); ASSERT_TRUE(result_node); - ASSERT_EQ(result_node->get_friendly_name(), "test"); + check_names(result_node, {"data_const", "indices_const", "updates_const", "axis_const", "test"}); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; range_test_check(result_node->cast_vector(), expected); @@ -3092,14 +3179,12 @@ TEST(constant_folding, constant_scatter_elements_update_negative_axis) { make_shared(data_const, indices_const, updates_const, axis_const); auto f = make_shared(scatter_elem_updt, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = get_result_constant(f); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{1.1f, 1.0f, 1.2f, 2.0f, 2.2f, 2.1f, 0.0f, 0.0f, 0.0f}; @@ -3120,14 +3205,12 @@ TEST(constant_folding, constant_scatter_elements_update_1d_axis) { make_shared(data_const, indices_const, updates_const, axis_const); auto f = make_shared(scatter_elem_updt, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = get_result_constant(f); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -3149,14 +3232,12 @@ TEST(constant_folding, constant_scatter_elements_update_3d_i16) { make_shared(data_const, indices_const, updates_const, axis_const); auto f = make_shared(scatter_elem_updt, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = get_result_constant(f); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{4, 2, 0, 1, 0, 6, 0, 5, 3, 10, 0, 12, 0, 11, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0}; @@ -3177,14 +3258,12 @@ TEST(constant_folding, constant_scatter_elements_update_one_elem) { make_shared(data_const, indices_const, updates_const, axis_const); auto f = make_shared(scatter_elem_updt, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = get_result_constant(f); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{input_data}; @@ -3199,21 +3278,21 @@ void test_constant_folding_reshape_v1(Shape& shape_in, vector values_shape, bool zero_flag = false) { auto constant_in = make_shared(element::f32, shape_in, values_in); + constant_in->set_friendly_name("constant_in"); auto constant_shape = make_shared(element::i64, shape_shape, values_shape); + constant_shape->set_friendly_name("constant_shape"); auto dyn_reshape = make_shared(constant_in, constant_shape, zero_flag); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = get_result_constant(f); ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); + check_names(new_const, {"constant_in", "constant_shape", "test"}); auto values_out = new_const->get_vector(); ASSERT_TRUE(test::all_close_f(values_in, values_out, MIN_FLOAT_TOLERANCE_BITS)); @@ -3270,16 +3349,13 @@ TEST(constant_folding, disable_constant_folding) { ov::disable_constant_folding(convert); - pass::Manager m; - m.register_pass(); - m.run_passes(f); - + run_constant_folding(f); // Check that sub-graph on second Interpolate input wasn't folded ASSERT_EQ(interpolate->input_value(1), convert_after->output(0)); ov::enable_constant_folding(convert); - m.run_passes(f); + run_constant_folding(f); // After we enabled CF the sub-graph will be folded to Constant ASSERT_TRUE(ov::is_type(interpolate->get_input_node_shared_ptr(1))); @@ -3302,16 +3378,14 @@ TEST(constant_folding, disable_constant_folding_simple) { ov::disable_constant_folding(reshape); - pass::Manager m; - m.register_pass(); - m.run_passes(f); + run_constant_folding(f); // Check that Reshape is not folded ASSERT_EQ(divide->input_value(1), reshape->output(0)); ov::enable_constant_folding(reshape); - m.run_passes(f); + run_constant_folding(f); // After we enabled CF the sub-graph will be folded to Constant ASSERT_TRUE(ov::is_type(divide->get_input_node_shared_ptr(1))); @@ -3379,15 +3453,13 @@ TEST(constant_folding, constant_loop) { auto results = ResultVector{result0, result1}; auto f = make_shared(results, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); + run_constant_folding(f); ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 2); - auto result_node_0 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto result_node_1 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto result_node_0 = get_result_constant(f); + auto result_node_1 = get_result_constant(f, 1); ASSERT_TRUE(result_node_0); ASSERT_TRUE(result_node_1); @@ -3410,9 +3482,7 @@ TEST(constant_folding, disable_constant_folding_for_shapeof) { ov::disable_constant_folding(shapeof); - pass::Manager mgr; - mgr.register_pass(); - mgr.run_passes(model); + run_constant_folding(model); ASSERT_EQ(reshape->input_value(1), shapeof->output(0)); } @@ -3430,9 +3500,7 @@ TEST(constant_folding, disable_constant_folding_for_squeeze_unsqueeze) { ov::disable_constant_folding(squeeze); ov::disable_constant_folding(unsqueeze); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(model); + run_constant_folding(model); ASSERT_EQ(count_ops_of_type(model), 1); ASSERT_EQ(count_ops_of_type(model), 1); @@ -3448,9 +3516,7 @@ TEST(constant_folding, disable_constant_folding_for_convert_like) { ov::disable_constant_folding(convert_like); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(model); + run_constant_folding(model); ASSERT_EQ(count_ops_of_type(model), 1); } @@ -3463,9 +3529,7 @@ TEST(constant_folding, fold_convert_like_node) { auto model = std::make_shared(NodeVector{consumer1}, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(model); + run_constant_folding(model); ASSERT_EQ(count_ops_of_type(model), 0); } @@ -3478,9 +3542,7 @@ TEST(constant_folding, fold_convert_like_but_node_is_not_foldable) { auto model = std::make_shared(NodeVector{consumer1}, ParameterVector{data}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(model); + run_constant_folding(model); ASSERT_EQ(count_ops_of_type(model), 1); } @@ -3513,11 +3575,11 @@ TEST(constant_folding, evaluate_on_tensor_vector) { EXPECT_CALL(*mock, evaluate).Times(1); auto model = std::make_shared(NodeVector{mock}, ParameterVector{}); - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(model); + + run_constant_folding(model); + vector add_expected{2, 4, 6, 8}; - auto result_node = ov::as_type_ptr(model->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = get_result_constant(model); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); ASSERT_EQ(add_expected, result_node->cast_vector()); diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index 2b4d4c952a7..e321d94dc8b 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "any_copy.hpp" @@ -33,6 +34,7 @@ #include "openvino/core/model.hpp" #include "openvino/core/runtime_attribute.hpp" #include "openvino/op/util/op_types.hpp" +#include "openvino/pass/manager.hpp" #include "threading/ie_executor_manager.hpp" #include "transformations/utils/utils.hpp" @@ -339,6 +341,12 @@ std::unordered_set GetSupportedNodes( } auto transformed_model = model->clone(); + + // Cleanup fused names if there are present in original model + ov::pass::Manager m; + m.register_pass(); + m.run_passes(transformed_model); + transform(transformed_model); auto ops = transformed_model->get_ordered_ops(); @@ -346,68 +354,80 @@ std::unordered_set GetSupportedNodes( std::unordered_set supported = GetRemovedNodes(model, transformed_model); std::unordered_set unsupported; + auto get_names_set = [](const std::shared_ptr& op) -> std::unordered_set { + auto fused_names = ngraph::getFusedNamesVector(op); + std::unordered_set names(fused_names.begin(), fused_names.end()); + names.insert(op->get_friendly_name()); + return names; + }; + + // Collect all operation names even there are no such names in original model for (auto&& op : ops) { - bool is_supported = false; - bool is_checked = false; - if (InferenceEngine::details::contains(original_ops, op->get_friendly_name())) { - is_supported = is_node_supported(op); - is_checked = true; - if (is_supported) { - supported.emplace(op->get_friendly_name()); - } else { - unsupported.emplace(op->get_friendly_name()); - } + auto names = get_names_set(op); + if (is_node_supported(op)) { + supported.insert(names.begin(), names.end()); + } else { + unsupported.insert(names.begin(), names.end()); } + } - for (auto&& fusedLayerName : ngraph::getFusedNamesVector(op)) { - if (InferenceEngine::details::contains(original_ops, fusedLayerName)) { - if (!is_checked) { - is_supported = is_node_supported(op); - is_checked = true; - } - if (is_supported) { - supported.emplace(fusedLayerName); - } else { - unsupported.emplace(fusedLayerName); - } - } - } + // If operation was fused into several operations where one is supported + // but another one is not supported remove it from supported + for (auto&& name : unsupported) { + supported.erase(name); } - for (auto&& unsupportedNode : unsupported) { - supported.erase(unsupportedNode); - } - for (auto&& node : model->get_ops()) { - if (InferenceEngine::details::contains(supported, node->get_friendly_name())) { - for (auto&& inputNodeOutput : node->input_values()) { - if (ov::op::util::is_constant(inputNodeOutput.get_node()) || - ov::op::util::is_parameter(inputNodeOutput.get_node())) { - supported.emplace(inputNodeOutput.get_node()->get_friendly_name()); - } - } - for (auto&& outputs : node->outputs()) { - for (auto&& outputNodeInput : outputs.get_target_inputs()) { - if (ov::op::util::is_output(outputNodeInput.get_node())) { - supported.emplace(outputNodeInput.get_node()->get_friendly_name()); - } - } - } - } - if (ov::op::util::is_constant(node) || ov::op::util::is_parameter(node)) { - if (node->output(0).get_target_inputs().size() && - !InferenceEngine::details::contains( - supported, - node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) { - supported.erase(node->get_friendly_name()); + auto has_all_consumers_unsupported = [&supported](const std::shared_ptr& node) { + for (auto&& input : node->output(0).get_target_inputs()) { + if (details::contains(supported, input.get_node()->get_friendly_name())) { + return false; } - } else if (ov::op::util::is_output(node)) { - if (!InferenceEngine::details::contains(supported, - node->input_values().begin()->get_node()->get_friendly_name())) { - supported.erase(node->get_friendly_name()); + } + return (node->output(0).get_target_inputs().size() != 0); + }; + + auto has_unsupported_source = [&supported](const std::shared_ptr& node) { + return !details::contains(supported, node->input_values().begin()->get_node()->get_friendly_name()); + }; + + // Walk over transformed model for special handing of Parameters/Constants/Results + for (auto&& op : ops) { + // Mark Constants and all fused names as unsupported if they are have no + // supported consumers/sources + if (ov::op::util::is_constant(op)) { + if (has_all_consumers_unsupported(op)) { + auto names = get_names_set(op); + for (auto& name : get_names_set(op)) { + supported.erase(name); + } } } } - return supported; + + // Finally get intersection of all supported operation names + // and operation names from original model + std::unordered_set res; + for (auto&& name : supported) { + if (details::contains(original_ops, name)) { + res.insert(name); + } + } + + // Remove parameters which has no supported consumers + for (auto& param : model->get_parameters()) { + if (has_all_consumers_unsupported(param)) { + res.erase(param->get_friendly_name()); + } + } + + // Remove results which has no supported source node + for (auto& result : model->get_results()) { + if (has_unsupported_source(result)) { + res.erase(result->get_friendly_name()); + } + } + + return res; } void SetExeNetworkInfo(const std::shared_ptr& exeNetwork, diff --git a/src/inference/tests/unit/query_model_test.cpp b/src/inference/tests/unit/query_model_test.cpp new file mode 100644 index 00000000000..a68abeaa06a --- /dev/null +++ b/src/inference/tests/unit/query_model_test.cpp @@ -0,0 +1,420 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include +#include + +#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" +#include "ngraph/ops.hpp" +#include "ngraph/pass/constant_folding.hpp" +#include "openvino/opsets/opset9.hpp" +#include "openvino/pass/manager.hpp" +#include "transformations/common_optimizations/common_optimizations.hpp" +#include "transformations/common_optimizations/nop_elimination.hpp" +#include "transformations/convert_precision.hpp" +#include "transformations/init_node_info.hpp" +#include "transformations/op_conversions/log_softmax_decomposition.hpp" +#include "transformations/rt_info/decompression.hpp" +#include "transformations/rt_info/fused_names_attribute.hpp" + +std::ostream& operator<<(std::ostream& os, const std::unordered_set& s) { + for (auto it = s.begin(); it != s.end(); ++it) { + if (it != s.begin()) { + os << ", " << *it; + } else { + os << *it; + } + } + return os; +} + +class GetSupportedNodesTest : public ::testing::Test { +protected: + ov::Shape m_shape{1, 84}; + std::shared_ptr m_function; + +public: + void Run(std::function&)> transform, + std::function)> is_node_supported, + const std::unordered_set& expected) { + auto supported = InferenceEngine::GetSupportedNodes(m_function, transform, is_node_supported); + auto const is_in_expected = [&expected](const std::string& x) { + return expected.find(x) != expected.end(); + }; + bool is_equal = + (supported.size() == expected.size()) && std::all_of(supported.begin(), supported.end(), is_in_expected); + std::stringstream ss; + if (!is_equal) { + ss << "Expected list of supported nodes '" << expected << "' but actually received '" << supported << "'"; + } + ASSERT_TRUE(is_equal) << ss.str(); + } +}; + +TEST_F(GetSupportedNodesTest, UnsupportedCompressedConstantCF) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + constant_compressed->set_friendly_name("constant_compressed"); + auto convert = std::make_shared(constant_compressed, ov::element::f32); + convert->set_friendly_name("constant"); + ov::mark_as_decompression(convert); + auto add = std::make_shared(param, convert); + add->set_friendly_name("add"); + auto result = std::make_shared(add); + result->set_friendly_name("result"); + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op); + }, + {}); +} + +TEST_F(GetSupportedNodesTest, ConstantSubgraphCF) { + { + auto constant_compressed1 = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + constant_compressed1->set_friendly_name("constant_compressed1"); + auto convert1 = std::make_shared(constant_compressed1, ov::element::f32); + convert1->set_friendly_name("constant1"); + ov::mark_as_decompression(convert1); + auto constant_compressed2 = ngraph::op::Constant::create(ov::element::f16, m_shape, {2}); + constant_compressed2->set_friendly_name("constant_compressed2"); + auto convert2 = std::make_shared(constant_compressed2, ov::element::f32); + convert2->set_friendly_name("constant2"); + ov::mark_as_decompression(convert2); + auto add = std::make_shared(convert1, convert2); + add->set_friendly_name("add"); + auto const_reshape = ov::opset9::Constant::create(ngraph::element::i64, ov::Shape{1}, {84}); + const_reshape->set_friendly_name("const_reshape"); + auto reshape = std::make_shared(add, const_reshape, false); + reshape->set_friendly_name("reshape"); + auto result = std::make_shared(reshape); + result->set_friendly_name("result"); + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op); + }, + {"constant_compressed1", + "constant1", + "constant_compressed2", + "constant2", + "add", + "const_reshape", + "reshape", + "result"}); +} + +TEST_F(GetSupportedNodesTest, SupportedCompressedConstantNop) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + constant_compressed->set_friendly_name("constant_compressed"); + auto convert = std::make_shared(constant_compressed, ov::element::f32); + convert->set_friendly_name("constant"); + auto add = std::make_shared(param, convert); + add->set_friendly_name("add"); + auto result = std::make_shared(add); + result->set_friendly_name("result"); + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass( + precisions_array{{ngraph::element::f16, ngraph::element::f32}}); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"input", "constant_compressed", "constant", "add", "result"}); +} + +TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto mul_const = ngraph::op::Constant::create(ov::element::f32, m_shape, {1}); + mul_const->set_friendly_name("constant"); + auto mul = std::make_shared(param, mul_const); + mul->set_friendly_name("output_operation"); + auto result = std::make_shared(mul); + result->set_friendly_name("result"); + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.run_passes(model); + for (auto& op : model->get_ops()) { + if (std::dynamic_pointer_cast(op) != nullptr) { + // Add one more dummy operation + auto consumers = op->output(0).get_target_inputs(); + auto shape = op->get_shape(); + auto add_const = ngraph::op::Constant::create(ov::element::f32, m_shape, {0}); + auto add = std::make_shared(op, add_const); + add->set_friendly_name(op->get_friendly_name()); + op->set_friendly_name(op->get_friendly_name() + "/previous"); + ov::copy_runtime_info(op, add); + for (auto& consumer : consumers) { + consumer.replace_source_output(add); + } + } + } + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"input", "constant", "output_operation", "result"}); +} + +TEST_F(GetSupportedNodesTest, PartiallySupportedCompressedConstant) { + { + auto param1 = std::make_shared(ov::element::f32, m_shape); + param1->set_friendly_name("input1"); + auto param2 = std::make_shared(ov::element::f32, m_shape); + param2->set_friendly_name("input2"); + auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + constant_compressed->set_friendly_name("constant_compressed"); + auto convert = std::make_shared(constant_compressed, ov::element::f32); + convert->set_friendly_name("constant"); + ov::mark_as_decompression(convert); + auto add = std::make_shared(param1, convert); + add->set_friendly_name("add"); + auto result1 = std::make_shared(add); + result1->set_friendly_name("result1"); + auto mul = std::make_shared(param2, convert); + mul->set_friendly_name("mul"); + auto result2 = std::make_shared(mul); + result2->set_friendly_name("result2"); + + m_function = std::make_shared(ngraph::ResultVector{result1, result2}, + ngraph::ParameterVector{param1, param2}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"input2", "constant_compressed", "constant", "mul", "result2"}); +} + +TEST_F(GetSupportedNodesTest, ConstantSubgraphSupported) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto weights = ov::opset9::Constant::create(ov::element::Type_t::f32, {10, 84}, {1}); + weights->set_friendly_name("weights"); + auto shapeOf = std::make_shared(weights); + shapeOf->set_friendly_name("shapeof"); + auto const1 = ov::opset9::Constant::create(ov::element::Type_t::i32, {1}, {1}); + const1->set_friendly_name("const1"); + auto const2 = ov::opset9::Constant::create(ov::element::Type_t::i64, {}, {0}); + const2->set_friendly_name("const2"); + auto gather = std::make_shared(shapeOf, const1, const2); + gather->set_friendly_name("gather"); + auto const3 = ov::opset9::Constant::create(ov::element::Type_t::i64, {1}, {1}); + const3->set_friendly_name("const3"); + auto concat = std::make_shared(ov::NodeVector{const3, gather}, 0); + concat->set_friendly_name("concat"); + auto reshape = std::make_shared(param, concat, false); + reshape->set_friendly_name("reshape"); + auto matmul = std::make_shared(reshape, weights, false, true); + matmul->set_friendly_name("matmul"); + auto result = std::make_shared(matmul); + result->set_friendly_name("result"); + + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"input", + "weights", + "shapeof", + "const1", + "const2", + "gather", + "const3", + "concat", + "reshape", + "matmul", + "result"}); +} + +TEST_F(GetSupportedNodesTest, UnmarkedSupportedInputsOutputs) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto constant = ngraph::op::Constant::create(ov::element::f32, ov::Shape{m_shape[1]}, {1}); + constant->set_friendly_name("constant"); + auto const_reshape = ov::opset9::Constant::create(ngraph::element::i64, ov::Shape{2}, m_shape); + const_reshape->set_friendly_name("const_reshape"); + auto reshape = std::make_shared(constant, const_reshape, false); + reshape->set_friendly_name("reshape"); + auto add = std::make_shared(param, reshape); + add->set_friendly_name("add"); + auto result = std::make_shared(add); + result->set_friendly_name("result"); + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + // Plugin don't mark input, constant and result as supported + return (std::dynamic_pointer_cast(op) != nullptr); + }, + {"add"}); +} + +TEST_F(GetSupportedNodesTest, WrongFusedNamesInOriginalModel) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto weights = ov::opset9::Constant::create(ov::element::Type_t::f32, {10, 84}, {1}); + weights->set_friendly_name("weights"); + auto matmul = std::make_shared(param, weights, false, true); + matmul->get_rt_info()[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("add"); + matmul->set_friendly_name("matmul"); + auto constant = ngraph::op::Constant::create(ov::element::f32, {1, 10}, {1}); + constant->set_friendly_name("constant"); + auto add = std::make_shared(matmul, constant); + add->get_rt_info()[ngraph::FusedNames::get_type_info_static()] = ngraph::FusedNames("matmul"); + add->set_friendly_name("add"); + auto result = std::make_shared(add); + result->set_friendly_name("result"); + + m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + return; + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"input", "weights", "matmul"}); +} + +TEST_F(GetSupportedNodesTest, FusedNamesSupportedUnsupportedBoth) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto dummy_param = std::make_shared(ov::element::f32, m_shape); + dummy_param->set_friendly_name("dummy_param"); + auto logsoftmax = std::make_shared(param, 1); + logsoftmax->set_friendly_name("logsoftmax"); + auto result = std::make_shared(logsoftmax); + result->set_friendly_name("result"); + m_function = + std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param, dummy_param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + // Exp is not supported and all constants are missing + return ov::op::util::is_parameter(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"dummy_param"}); // kepp dummy only since it has no unsupported consumers +} + +TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) { + { + auto param = std::make_shared(ov::element::f32, m_shape); + param->set_friendly_name("input"); + auto slope_compressed = ov::opset9::Constant::create(ngraph::element::f16, ngraph::Shape{}, {-2.f}); + slope_compressed->set_friendly_name("slope_compressed"); + auto convert_slope = std::make_shared(slope_compressed, ov::element::f32); + convert_slope->set_friendly_name("slope"); + ov::mark_as_decompression(convert_slope); + auto prelu = std::make_shared(param, convert_slope); + prelu->set_friendly_name("prelu"); + auto shapeOf = std::make_shared(prelu); + shapeOf->set_friendly_name("shapeof"); + auto convert_fp32 = std::make_shared(shapeOf, ov::element::f32); + convert_fp32->set_friendly_name("convert_fp32"); + auto scale = ov::opset9::Constant::create(ngraph::element::f32, ngraph::Shape{}, {2.0f}); + scale->set_friendly_name("scale"); + auto mul_scale = std::make_shared(convert_fp32, scale); + mul_scale->set_friendly_name("mul_scale"); + auto convert_i64 = std::make_shared(mul_scale, ov::element::i64); + convert_i64->set_friendly_name("convert_i64"); + auto interpolate = std::make_shared(prelu, + convert_i64, + scale, + ov::opset9::Interpolate::InterpolateAttrs()); + interpolate->set_friendly_name("interpolate"); + auto interpolate_result = std::make_shared(interpolate); + interpolate_result->set_friendly_name("interpolate_result"); + m_function = + std::make_shared(ngraph::ResultVector{interpolate_result}, ngraph::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || + (std::dynamic_pointer_cast(op) != nullptr); + }, + {"input", "slope_compressed", "slope", "prelu"}); // keep dummy only since it has no unsupported consumers +} \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/ngraph_transformations/convert_matmul_to_fc.cpp b/src/plugins/intel_cpu/src/ngraph_transformations/convert_matmul_to_fc.cpp index d176dd857ff..2205eea67de 100644 --- a/src/plugins/intel_cpu/src/ngraph_transformations/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_cpu/src/ngraph_transformations/convert_matmul_to_fc.cpp @@ -102,8 +102,9 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { * sequence starting from 0 and replace last two dimension. For example for length = 4 the * order will be [0, 1, 3, 2] that emulates transpose_a or transpose_b attribute. */ + ngraph::NodeVector new_ops; - auto create_transpose = [this](const ngraph::Output& node, const std::string& transpose_name) { + auto create_transpose = [this, &new_ops ](const ngraph::Output& node, const std::string& transpose_name) { auto rank = node.get_partial_shape().rank(); std::vector transpose_order(rank.get_length()); std::iota(transpose_order.begin(), transpose_order.end(), 0); @@ -112,13 +113,14 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { auto transpose_const = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ transpose_order.size() }, transpose_order); auto transpose = ngraph::op::util::make_try_fold(node, transpose_const); if (!ngraph::is_type(transpose)) { + new_ops.push_back(transpose_const); MatcherPass::register_new_node(transpose); } transpose->set_friendly_name(transpose_name); + new_ops.push_back(transpose); return transpose; }; - ngraph::NodeVector new_ops; bool success = true; ngraph::PartialShape shape_a_aligned, shape_b_aligned; std::tie(success, shape_a_aligned, shape_b_aligned) = get_aligned_shapes(); @@ -137,7 +139,6 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { // Weights normalization if (!matmul->get_transpose_b()) { fc_input_b = create_transpose(fc_input_b, matmul->get_friendly_name() + "/transpose_b"); - new_ops.push_back(fc_input_b.get_node_shared_ptr()); } if (rank_b != 2) { @@ -146,13 +147,15 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { std::vector reshape_shape_values = { -1ll, static_cast(K.get_length()) }; auto reshape_shape = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 2 }, reshape_shape_values); fc_input_b = ngraph::op::util::make_try_fold(fc_input_b, reshape_shape, false); + if (!std::dynamic_pointer_cast(fc_input_b.get_node_shared_ptr())) { + new_ops.push_back(reshape_shape); + } new_ops.push_back(fc_input_b.get_node_shared_ptr()); } // Input normalization if (matmul->get_transpose_a() && rank_a != 1) { fc_input_a = create_transpose(fc_input_a, matmul->get_friendly_name() + "/transpose_a"); - new_ops.push_back(fc_input_a.get_node_shared_ptr()); } auto output_rank = matmul->get_output_partial_shape(0).rank(); diff --git a/src/plugins/intel_cpu/src/ngraph_transformations/move_eltwise_up_data_movement.cpp b/src/plugins/intel_cpu/src/ngraph_transformations/move_eltwise_up_data_movement.cpp index b5a733c623b..fded6340557 100644 --- a/src/plugins/intel_cpu/src/ngraph_transformations/move_eltwise_up_data_movement.cpp +++ b/src/plugins/intel_cpu/src/ngraph_transformations/move_eltwise_up_data_movement.cpp @@ -89,6 +89,7 @@ ov::intel_cpu::MoveEltwiseUpThroughDataMov::MoveEltwiseUpThroughDataMov() { if (is_binary_op && current->get_output_partial_shape(0).rank().get_length() != eltwise->get_input_partial_shape(1).rank().get_length()) { auto old_eltwise_const = std::dynamic_pointer_cast(eltwise->get_input_node_shared_ptr(1)); auto new_constant = std::make_shared(*old_eltwise_const.get(), ngraph::Shape{}); + ngraph::copy_runtime_info(old_eltwise_const, new_constant); ngraph::replace_node(old_eltwise_const, new_constant); } ngraph::replace_output_update_name(eltwise->output(0), eltwise->input_value(0)); diff --git a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp index 796a5e1f041..56f240ccfa1 100644 --- a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp +++ b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp @@ -9,6 +9,7 @@ #include #include +#include "openvino/op/util/op_types.hpp" namespace ov { namespace intel_cpu { @@ -25,6 +26,9 @@ public: MemoryFormats() = default; explicit MemoryFormats(const std::string &_memory_format) : memory_format(_memory_format) {} std::string getMemoryFormats() const { return memory_format; } + bool is_copyable(const std::shared_ptr& to) const override { + return (!ov::op::util::is_constant(to)); + } ov::Any merge(const ngraph::NodeVector & nodes) const override { std::set unique_mem_format; diff --git a/src/plugins/intel_gna/src/transformations/handle_transposes_around_matmul.cpp b/src/plugins/intel_gna/src/transformations/handle_transposes_around_matmul.cpp index 45c3231a37e..c1721b23068 100644 --- a/src/plugins/intel_gna/src/transformations/handle_transposes_around_matmul.cpp +++ b/src/plugins/intel_gna/src/transformations/handle_transposes_around_matmul.cpp @@ -25,15 +25,18 @@ void ReplaceTransposeWithReshape(std::shared_ptr transpose_node) { ngraph::Shape{shape.size()}, shape); auto reshape_node = std::make_shared(transpose_node->input_value(0), reshape_const, false); reshape_node->set_friendly_name(transpose_node->get_friendly_name()); - ngraph::copy_runtime_info(transpose_node, reshape_node); + ngraph::copy_runtime_info(transpose_node, {reshape_node, reshape_const }); transpose_node->output(0).replace(reshape_node->output(0)); } void InsertTranspose(std::shared_ptr prev_node, const std::string& base_name, bool before_matmul) { - auto create_reshape = [](const ngraph::Shape& shape, std::shared_ptr input_node, const std::string& name) { + ngraph::NodeVector new_ops; + auto create_reshape = [&new_ops](const ngraph::Shape& shape, std::shared_ptr input_node, const std::string& name) { auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); + new_ops.push_back(reshape_const); auto node = std::make_shared(input_node, reshape_const, false); + new_ops.push_back(node); node->set_friendly_name(name); return node; }; @@ -51,23 +54,21 @@ void InsertTranspose(std::shared_ptr prev_node, const std::string& std::iota(std::begin(permute_order), std::end(permute_order), 0); std::swap(permute_order[transpose_ids[0]], permute_order[transpose_ids[1]]); - ngraph::NodeVector new_ops; std::shared_ptr node = prev_node; if (!before_matmul) { auto shape = prev_node->get_output_shape(0); std::swap(shape[0], shape[1]); node = create_reshape(shape, node, base_name + "/reshape_before_transpose"); - new_ops.push_back(node); } auto transpose_order = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{permute_order.size()}, permute_order); + new_ops.push_back(transpose_order); node = std::make_shared(node, transpose_order); node->set_friendly_name(base_name + "/in_transpose"); new_ops.push_back(node); if (before_matmul) { node = create_reshape(orig_shape, node, base_name + "/reshape_after_transpose"); - new_ops.push_back(node); } ngraph::copy_runtime_info(prev_node, new_ops); diff --git a/src/plugins/intel_gna/src/transformations/insert_reshape_around_matmul.cpp b/src/plugins/intel_gna/src/transformations/insert_reshape_around_matmul.cpp index da04f0add9d..f847773bc37 100644 --- a/src/plugins/intel_gna/src/transformations/insert_reshape_around_matmul.cpp +++ b/src/plugins/intel_gna/src/transformations/insert_reshape_around_matmul.cpp @@ -80,10 +80,11 @@ static bool InsertReshape( bool need_reshape_before = !reshape_input_node || reshape_input_node->get_output_shape(0).size() != 2; if (need_reshape_before) { std::vector before_shape = {-1, static_cast(first_node->get_output_shape(0).back())}; - auto reshape_before_node = std::make_shared(first_node, - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{before_shape.size()}, before_shape), false); + auto reshape_before_node_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{before_shape.size()}, before_shape); + auto reshape_before_node = std::make_shared(first_node, reshape_before_node_const, false); reshape_before_node->set_friendly_name(matmul_node->get_friendly_name() + "/reshape_before_matmul"); - ngraph::copy_runtime_info(first_node, reshape_before_node); + ngraph::copy_runtime_info(first_node, { reshape_before_node, reshape_before_node_const }); matmul_node->input(matmul_input_index).replace_source_output(reshape_before_node->output(0)); if (auto transpose_node = std::dynamic_pointer_cast(nodes.back())) { nodes.pop_back(); @@ -103,11 +104,11 @@ static bool InsertReshape( << " For this reason, there is no way to determine permutation shape."; } std::vector permutation_shape = {1, 0}; + auto transpose_node_copy_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{permutation_shape.size()}, permutation_shape); auto transpose_node_copy = transpose_node->clone_with_new_inputs( - {transpose_node->input_values()[0], - std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{permutation_shape.size()}, permutation_shape)}); - ngraph::copy_runtime_info(transpose_node, transpose_node_copy); + {transpose_node->input_values()[0], transpose_node_copy_const }); + ngraph::copy_runtime_info(transpose_node, {transpose_node_copy, transpose_node_copy_const}); ngraph::replace_node(transpose_node, transpose_node_copy); nodes.push_back(transpose_node_copy); } @@ -124,11 +125,11 @@ static bool InsertReshape( } if (need_reshape_after) { - auto reshape_after_node = std::make_shared(nodes.back(), - std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{last_node_shape.size()}, last_node_shape), false); + auto reshape_after_node_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{last_node_shape.size()}, last_node_shape); + auto reshape_after_node = std::make_shared(nodes.back(), reshape_after_node_const, false); reshape_after_node->set_friendly_name(nodes.back()->get_friendly_name()); - ngraph::copy_runtime_info(nodes.back(), reshape_after_node); + ngraph::copy_runtime_info(nodes.back(), { reshape_after_node, reshape_after_node_const}); for (auto consumer : consumers) { consumer.replace_source_output(reshape_after_node); } diff --git a/src/plugins/intel_gna/src/transformations/insert_transpose_after_convolution_or_pooling.cpp b/src/plugins/intel_gna/src/transformations/insert_transpose_after_convolution_or_pooling.cpp index 3f9af8c3e72..d59291c52e3 100644 --- a/src/plugins/intel_gna/src/transformations/insert_transpose_after_convolution_or_pooling.cpp +++ b/src/plugins/intel_gna/src/transformations/insert_transpose_after_convolution_or_pooling.cpp @@ -96,13 +96,13 @@ bool InsertTransposeAfterConvOrPool::run_on_model(const std::shared_ptr(node, reshapeConstBefore, false); reshapeBefore->set_friendly_name(node->get_friendly_name() + "/reshape_out"); - ngraph::copy_runtime_info(node, reshapeBefore); + ngraph::copy_runtime_info(node, {reshapeBefore, reshapeConstBefore}); auto transpose_order = transposeInShape.size() == 3 ? ngraph::Shape{0, 2, 1} : ngraph::Shape{0, 3, 1, 2}; - auto transpose = std::make_shared(reshapeBefore, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{transpose_order.size()}, transpose_order)); + auto transpose_order_const = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{transpose_order.size()}, transpose_order); + auto transpose = std::make_shared(reshapeBefore, transpose_order_const); transpose->set_friendly_name(node->get_friendly_name() + "/transpose_out"); - ngraph::copy_runtime_info(node, transpose); + ngraph::copy_runtime_info(node, {transpose, transpose_order_const}); for (auto& input : consumers) { input.replace_source_output(transpose); diff --git a/src/plugins/intel_gna/src/transformations/pwl_approximation.cpp b/src/plugins/intel_gna/src/transformations/pwl_approximation.cpp index 36790d63f54..69dd5748298 100644 --- a/src/plugins/intel_gna/src/transformations/pwl_approximation.cpp +++ b/src/plugins/intel_gna/src/transformations/pwl_approximation.cpp @@ -450,7 +450,7 @@ bool transform_to_pwl( m_constant, b_constant, alpha_constant); pwl->set_base_node(node); pwl->set_friendly_name(node->get_friendly_name()); - ngraph::copy_runtime_info(node, pwl); + ngraph::copy_runtime_info(node, {pwl, m_constant, b_constant, alpha_constant}); replace_node(node, pwl); return true; } diff --git a/src/plugins/intel_gna/src/transformations/swap_input_matmul_gna.cpp b/src/plugins/intel_gna/src/transformations/swap_input_matmul_gna.cpp index 5de64b2562f..dbd3cc5fae7 100644 --- a/src/plugins/intel_gna/src/transformations/swap_input_matmul_gna.cpp +++ b/src/plugins/intel_gna/src/transformations/swap_input_matmul_gna.cpp @@ -29,22 +29,24 @@ static void SwapAndTransposeInputs( std::shared_ptr fq = nullptr, std::shared_ptr act = nullptr, std::shared_ptr transpose = nullptr) { + ngraph::NodeVector new_ops; + auto create_transpose = - [](ngraph::Output node, const std::string& transpose_name) -> std::shared_ptr { + [&new_ops](ngraph::Output node, const std::string& transpose_name) -> std::shared_ptr { ngraph::Shape output_shape = node.get_node_shared_ptr()->get_shape(); std::vector transpose_order(output_shape.size()); std::iota(transpose_order.begin(), transpose_order.end(), 0); std::swap(*(transpose_order.end() - 1), *(transpose_order.end() - 2)); - auto transpose = std::make_shared( - node, ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape {transpose_order.size()}, transpose_order)); + auto transpose_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape {transpose_order.size()}, transpose_order); + new_ops.push_back(transpose_const); + auto transpose = std::make_shared(node, transpose_const); transpose->set_friendly_name(transpose_name); + new_ops.push_back(transpose); return transpose; }; - ngraph::NodeVector new_ops; - auto transpose_matmul_input = [matmul_node, &new_ops, create_transpose](size_t ix) { std::shared_ptr matmul_input = matmul_node->input_value(ix).get_node_shared_ptr(); auto input_transpose = std::dynamic_pointer_cast(matmul_input); @@ -53,7 +55,6 @@ static void SwapAndTransposeInputs( ngraph::replace_output_update_name(input_transpose->output(0), input_transpose->input_value(0)); } else { matmul_input = create_transpose(matmul_node->input_value(ix), matmul_node->get_friendly_name() + "/input_transpose"); - new_ops.push_back(matmul_input); } return matmul_input; }; @@ -90,18 +91,17 @@ static void SwapAndTransposeInputs( // output of MatMul will be transposed comparing with original one, so the bias should be transposed too if (bias->get_output_shape(0).size() > 1) { bias = create_transpose(bias, bias->get_friendly_name() + "/transpose"); - new_ops.push_back(bias); auto transpose_shape = bias->get_output_shape(0); auto matmul_shape = matmul_node->get_output_shape(0); if (transpose_shape.size() > matmul_shape.size()) { std::vector reshape_shape(matmul_shape.size(), 1); std::copy_if(transpose_shape.begin(), transpose_shape.end(), reshape_shape.begin(), [](size_t e) { return e > 1; }); - bias = std::make_shared(bias, - std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{reshape_shape.size()}, reshape_shape), false); + auto bias_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{reshape_shape.size()}, reshape_shape); + bias = std::make_shared(bias, bias_const, false); bias->set_friendly_name(add->get_friendly_name() + "/reshape"); - ngraph::copy_runtime_info(add, bias); + ngraph::copy_runtime_info(add, {bias, bias_const}); new_ops.push_back(bias); } } @@ -126,7 +126,6 @@ static void SwapAndTransposeInputs( if (transpose == nullptr) { new_node = create_transpose(new_node, last_layer_name); - new_ops.push_back(new_node); } else { ngraph::replace_output_update_name(transpose->output(0), transpose->input_value(0)); new_node->set_friendly_name(last_layer_name); diff --git a/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp b/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp index 3327150cbe1..bbdb9d72923 100644 --- a/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp +++ b/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp @@ -837,10 +837,8 @@ void check_rt_info(const std::shared_ptr& f) { static const std::vector attrs_to_check{"fused_names_0"}; std::ostringstream err_log; - for (auto& op : f->get_ops()) { - if (ov::op::util::is_constant(op)) - continue; + for (auto& op : f->get_ops()) { const auto& rt_info = op->get_rt_info(); for (const auto& attr_name : attrs_to_check) { if (!rt_info.count(attr_name)) {