From ba4edc08d9974c426f120aafe86aeff98d2e8fa6 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Mon, 26 Dec 2022 11:58:13 +0100 Subject: [PATCH] [Snippets] Removed limitation on Subgraph creation after Parameters (#13893) --- src/common/snippets/src/op/subgraph.cpp | 3 - .../snippets/src/pass/collapse_subgraph.cpp | 6 +- .../snippets/src/pass/fq_decomposition.cpp | 2 +- src/plugins/intel_cpu/src/graph.cpp | 2 +- .../snippets_mark_skipped.cpp | 11 +-- .../snippets_mark_skipped.hpp | 2 +- .../rt_info/memory_formats_attribute.cpp | 4 +- .../rt_info/memory_formats_attribute.hpp | 4 +- .../functional/bfloat16/concat_in_place.cpp | 2 +- .../tests/functional/bfloat16/conv_conv.cpp | 2 +- .../functional/bfloat16/conv_dwconv_relu.cpp | 2 +- .../bfloat16/faster_100_5_1_1_conv.cpp | 2 +- .../bfloat16/scaleshift_conv_eltwise_conv.cpp | 2 +- .../scaleshift_conv_eltwise_relu_conv.cpp | 2 +- .../scaleshift_conv_eltwise_scaleshift.cpp | 2 +- .../bfloat16/scaleshift_conv_elu_conv.cpp | 2 +- .../bfloat16/scaleshift_conv_relu.cpp | 2 +- .../scaleshift_conv_x2_concat_relu.cpp | 2 +- .../bfloat16/scaleshift_conv_x2_eltwise.cpp | 2 +- .../scaleshift_conv_x2_mixed1_eltwise.cpp | 2 +- .../scaleshift_conv_x2_mixed2_eltwise.cpp | 2 +- .../bfloat16/scaleshift_conv_x3_eltwise.cpp | 2 +- .../scaleshift_x2_conv_x2_eltwise.cpp | 5 +- .../scaleshift_x3_conv_eltwise_relu.cpp | 4 +- .../bfloat16/tail_fp32_optimization.cpp | 2 +- .../functional/bfloat16/topk_inputs_i32.cpp | 2 +- .../runtime_precision.cpp | 4 +- .../fake_quantize_transformation.cpp | 5 + .../multiply_transformation.cpp | 20 ++-- .../shared_tests_instances/snippets/add.cpp | 23 +---- .../snippets/convert.cpp | 16 ++-- .../snippets/eltwise_two_results.cpp | 2 +- .../snippets/max_num_params_eltwise.cpp | 8 +- .../snippets/three_inputs_eltwise.cpp | 14 +-- .../snippets/two_inputs_and_outputs.cpp | 2 +- .../single_layer_tests/activation.cpp | 22 ++++- .../single_layer_tests/conversion.cpp | 78 ++++++++++++--- .../functional/single_layer_tests/eltwise.cpp | 84 ++++++++--------- .../functional/test_utils/cpu_test_utils.cpp | 23 +++-- .../functional/test_utils/cpu_test_utils.hpp | 4 +- .../test_utils/fusing_test_utils.cpp | 19 ++-- .../test_utils/fusing_test_utils.hpp | 26 ++++- .../snipptes_mark_skipped.cpp | 8 -- .../plugin/shared/include/snippets/add.hpp | 11 +-- .../snippets/max_num_params_eltwise.hpp | 4 +- .../include/snippets/three_inputs_eltwise.hpp | 5 - .../executable_network/exec_graph_info.cpp | 72 +++++++++++++- .../plugin/shared/src/snippets/add.cpp | 24 +---- .../src/snippets/max_num_params_eltwise.cpp | 8 +- .../src/snippets/three_inputs_eltwise.cpp | 15 --- .../include/subgraph_simple.hpp | 42 +-------- .../src/subgraph_convert.cpp | 94 +++++++------------ .../src/subgraph_simple.cpp | 68 +++----------- 53 files changed, 381 insertions(+), 395 deletions(-) diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index baeca6c02bf..ed623c4e2a8 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -134,9 +134,6 @@ auto snippets::op::Subgraph::wrap_node_as_subgraph(const std::shared_ptrclear_control_dependencies(); ngraph::ResultVector body_results; for (auto output : node->outputs()) { body_results.push_back(std::make_shared(body_node->output(output.get_index()))); diff --git a/src/common/snippets/src/pass/collapse_subgraph.cpp b/src/common/snippets/src/pass/collapse_subgraph.cpp index 0f3dc5e8d80..eb63959a88e 100644 --- a/src/common/snippets/src/pass/collapse_subgraph.cpp +++ b/src/common/snippets/src/pass/collapse_subgraph.cpp @@ -174,7 +174,7 @@ auto update_out_tensor_name(std::shared_ptr &sub } // namespace bool AppropriateForSubgraph(const std::shared_ptr &node) { - return is_supported_op(node) && has_supported_in_out(node); + return is_supported_op(node) && has_supported_in_out(node) && node->get_control_dependencies().empty(); } void SetSnippetsNodeType(const std::shared_ptr &node, SnippetsNodeType nodeType) { @@ -273,6 +273,8 @@ TokenizeSnippets::TokenizeSnippets() { OutputVector external_inputs; // inputs to the node before merge to subgraph OutputVector internal_inputs; + // nodes whose rt_info should be copied into result subgraph + NodeVector replaced_nodes{node}; auto input_values = node->input_values(); /* @@ -347,6 +349,7 @@ TokenizeSnippets::TokenizeSnippets() { input_subgraphs.insert(input_node); fusedNames += getFusedNames(subgraph); + replaced_nodes.push_back(subgraph); if (has_result_child(subgraph)) { // we set input subgraph name to the current subgraph @@ -553,6 +556,7 @@ TokenizeSnippets::TokenizeSnippets() { body->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); } auto subgraph = op::build_subgraph(node, external_inputs, body, subgraph_name); + copy_runtime_info(replaced_nodes, subgraph); const auto & act_body = subgraph->body(); for (size_t i = 0; i < act_body.get_parameters().size(); i++) { act_body.get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); diff --git a/src/common/snippets/src/pass/fq_decomposition.cpp b/src/common/snippets/src/pass/fq_decomposition.cpp index f92c65474bd..6908552b114 100644 --- a/src/common/snippets/src/pass/fq_decomposition.cpp +++ b/src/common/snippets/src/pass/fq_decomposition.cpp @@ -19,7 +19,7 @@ namespace { bool isValidRangesInputs(const std::shared_ptr& fq) { auto il = fq->input_value(1); auto ih = fq->input_value(2); - auto greater_equal = std::make_shared(il, ih); + auto greater_equal = std::make_shared(il, ih); ngraph::OutputVector result(1); if (!greater_equal->constant_fold(result, greater_equal->input_values())) diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 3549e679609..faed0446379 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -1603,7 +1603,7 @@ void Graph::EnforceBF16() { // Concatenation node is exception because it doesn't change an accuracy for BF16 activation node->getType() != Type::Concatenation) && // exclude Eltwise after Input since it supports conversion to BF16 - !(parent->getType() == Type::Input && node->getType() == Type::Eltwise) && + !(parent->getType() == Type::Input && (node->getType() == Type::Eltwise || node->getType() == Type::Subgraph)) && node->getOriginalInputPrecisionAtPort(i) == Precision::FP32) node->setOriginalInputPrecisionAtPort(i, Precision::BF16); } diff --git a/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.cpp index 5e4fda63190..6b0963a2fcb 100644 --- a/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.cpp @@ -433,10 +433,7 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr &m) { for (auto &node : m->get_ordered_ops()) { if (ngraph::op::is_constant(node)) continue; - - if (ngraph::op::is_parameter(node)) { - SetNodeFusingType(node, NodeFusingType::IgnoredAfterInputs); - } else if (isSuitableConvolutionParent(node)) { + if (isSuitableConvolutionParent(node)) { // Initiate fusing chain SetNodeFusingType(node, NodeFusingType::FusedWithConvolution); channelAxis = DEFAULT_AXIS; @@ -490,12 +487,6 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr &m) { NodeFusingType updatedChainType = fusingChainType; if (isSuitableChildForFusingMatMul(node, isExecutedInINT8, updatedChainType, channelAxis)) PropagateIfHasOnlyChild(node, updatedChainType); - } else if (fusingChainType == NodeFusingType::IgnoredAfterInputs && (snippets::pass::AppropriateForSubgraph(node) || - ov::is_type(node) || ov::is_type(node))) { - // In OV_API 2.0 after Input node with I8/U8 precisions incerts Convert node, moreother on TF models inserts - // Transpose layer. These brakes an idea to leave Eltwise node with I8/U8 inputs and FP32 outputs instead of Subgrath node - // TODO Remove an additional check on Convert/Transpose here after enabling Subgraths with I8/U8 inputs and FP32 outputs - SetNodeFusingType(node, NodeFusingType::IgnoredAfterInputs); } } } diff --git a/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.hpp b/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.hpp index d109e98497a..f7485c6790e 100644 --- a/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.hpp +++ b/src/plugins/intel_cpu/src/ngraph_transformations/snippets_mark_skipped.hpp @@ -37,7 +37,7 @@ enum class NodeFusingType : int64_t { NotSet, FusedTerminator, FusedWithConvolution, FusedWithBinaryConvolution, FusedWithConvolutionSumActivation, - FusedWithMatMul, FusedWithMatMulI8, FusedWithReduce, FusedWithMisc, IgnoredAfterInputs}; + FusedWithMatMul, FusedWithMatMulI8, FusedWithReduce, FusedWithMisc}; } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.cpp b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.cpp index ce644bc8968..e2658fcc8aa 100644 --- a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.cpp +++ b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.cpp @@ -19,7 +19,7 @@ std::string getInputMemoryFormats(const std::shared_ptr& node) { auto it_info = node->get_rt_info().find(InputMemoryFormats::get_type_info_static()); if (it_info != node->get_rt_info().end()) { if (it_info->second.is()) { - return it_info->second.as().getMemoryFormats(); + return it_info->second.as().to_string(); } } return {}; @@ -31,7 +31,7 @@ std::string getOutputMemoryFormats(const std::shared_ptr& node) { auto it_info = node->get_rt_info().find(OutputMemoryFormats::get_type_info_static()); if (it_info != node->get_rt_info().end()) { if (it_info->second.is()) { - return it_info->second.as().getMemoryFormats(); + return it_info->second.as().to_string(); } } return {}; diff --git a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp index 56f240ccfa1..de0e37bb540 100644 --- a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp +++ b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp @@ -25,7 +25,7 @@ protected: public: MemoryFormats() = default; explicit MemoryFormats(const std::string &_memory_format) : memory_format(_memory_format) {} - std::string getMemoryFormats() const { return memory_format; } + std::string to_string() const override { return memory_format; }; bool is_copyable(const std::shared_ptr& to) const override { return (!ov::op::util::is_constant(to)); } @@ -36,7 +36,7 @@ public: for (auto &node : nodes) { auto it_info = node->get_rt_info().find(MemoryFormat::get_type_info_static()); if (it_info != node->get_rt_info().end()) { - std::string mem_format = it_info->second.template as().getMemoryFormats(); + std::string mem_format = it_info->second.template as().to_string(); if (!mem_format.empty()) { unique_mem_format.insert(mem_format); } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/concat_in_place.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/concat_in_place.cpp index 1e3cb6664f0..65b858e0f03 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/concat_in_place.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/concat_in_place.cpp @@ -128,7 +128,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/conv_conv.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/conv_conv.cpp index 1d48d1533e1..80c25fcf4ad 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/conv_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/conv_conv.cpp @@ -98,7 +98,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/conv_dwconv_relu.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/conv_dwconv_relu.cpp index b835020b501..5cbcade5f4b 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/conv_dwconv_relu.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/conv_dwconv_relu.cpp @@ -117,7 +117,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["RELU"] = "ndef"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/faster_100_5_1_1_conv.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/faster_100_5_1_1_conv.cpp index 94de374af34..a627e542287 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/faster_100_5_1_1_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/faster_100_5_1_1_conv.cpp @@ -105,7 +105,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["Add_4"] = "ndef"; + expectedPrecisions["Add_4"] = netPrecision.name(); expectedPrecisions["Convolution_6"] = "BF16"; } }; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_conv.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_conv.cpp index 015477f0f9b..25ef2082476 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_conv.cpp @@ -123,7 +123,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["ELT_1"] = "ndef"; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_relu_conv.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_relu_conv.cpp index aa00d9edb37..91296860bb1 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_relu_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_relu_conv.cpp @@ -127,7 +127,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["RELU_1"] = "ndef"; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_scaleshift.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_scaleshift.cpp index f00aa69e2fb..0e5b99f6c9c 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_scaleshift.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_eltwise_scaleshift.cpp @@ -121,7 +121,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["ELT_1"] = "ndef"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_elu_conv.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_elu_conv.cpp index 734c83115d1..ad0036714a4 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_elu_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_elu_conv.cpp @@ -109,7 +109,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_relu.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_relu.cpp index 0325a2888cf..462dad1dbe0 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_relu.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_relu.cpp @@ -97,7 +97,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["RELU_1"] = "ndef"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_concat_relu.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_concat_relu.cpp index b2030cd7e6a..b0c8afb5795 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_concat_relu.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_concat_relu.cpp @@ -114,7 +114,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_eltwise.cpp index 19325209567..78780d7cf1b 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_eltwise.cpp @@ -106,7 +106,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["ELT_1"] = "ndef"; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed1_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed1_eltwise.cpp index e534a876195..7af444d64c5 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed1_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed1_eltwise.cpp @@ -106,7 +106,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["ADD_1"] = "ndef"; + expectedPrecisions["ADD_1"] = netPrecision.name(); expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["ELT_1"] = "ndef"; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed2_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed2_eltwise.cpp index c9c36b325ad..72a9abecfb5 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed2_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x2_mixed2_eltwise.cpp @@ -110,7 +110,7 @@ protected: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters expectedPrecisions["CONV_1"] = "BF16"; - expectedPrecisions["ADD_2"] = "ndef"; + expectedPrecisions["ADD_2"] = netPrecision.name(); expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["ELT_1"] = "ndef"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x3_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x3_eltwise.cpp index 14174c0d320..3cf12e220a3 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x3_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_conv_x3_eltwise.cpp @@ -141,7 +141,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["Add_1"] = "ndef"; + expectedPrecisions["Add_1"] = netPrecision.name(); expectedPrecisions["Convolution_1"] = "BF16"; expectedPrecisions["Convolution_2"] = "BF16"; expectedPrecisions["ELT_1"] = "ndef"; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x2_conv_x2_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x2_conv_x2_eltwise.cpp index 0838ea82448..e9c6e7ea3ce 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x2_conv_x2_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x2_conv_x2_eltwise.cpp @@ -128,10 +128,9 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["Add_1"] = "ndef"; - expectedPrecisions["Add_2"] = "ndef"; + expectedPrecisions["Add_2"] = netPrecision.name(); expectedPrecisions["Convolution_1"] = "BF16"; - expectedPrecisions["ELT_1"] = "ndef"; + expectedPrecisions["ELT_1"] = netPrecision.name(); } }; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp index a1eb1b14c79..9ec664b41e2 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp @@ -147,9 +147,9 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["Add_1"] = "ndef"; + expectedPrecisions["Add_1"] = netPrecision.name(); expectedPrecisions["Convolution_1"] = "BF16"; - expectedPrecisions["Add_2"] = "ndef"; + expectedPrecisions["Add_2"] = netPrecision.name(); expectedPrecisions["ELT_1"] = "ndef"; expectedPrecisions["RELU_1"] = "ndef"; } diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/tail_fp32_optimization.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/tail_fp32_optimization.cpp index 93a5f83916e..5962a349269 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/tail_fp32_optimization.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/tail_fp32_optimization.cpp @@ -112,7 +112,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["Add_4"] = "ndef"; + expectedPrecisions["Add_4"] = netPrecision.name(); expectedPrecisions["Convolution_6"] = "BF16"; } }; diff --git a/src/plugins/intel_cpu/tests/functional/bfloat16/topk_inputs_i32.cpp b/src/plugins/intel_cpu/tests/functional/bfloat16/topk_inputs_i32.cpp index 4a10f0efd28..4294a93829d 100644 --- a/src/plugins/intel_cpu/tests/functional/bfloat16/topk_inputs_i32.cpp +++ b/src/plugins/intel_cpu/tests/functional/bfloat16/topk_inputs_i32.cpp @@ -132,7 +132,7 @@ protected: // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters - expectedPrecisions["Add_4"] = "ndef"; + expectedPrecisions["Add_4"] = netPrecision.name(); expectedPrecisions["Convolution_1"] = "BF16"; expectedPrecisions["Convolution_2"] = "BF16"; expectedPrecisions["TopK_1"] = netPrecision.name(); // tail kept in FP32 precision diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/runtime_precision.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/runtime_precision.cpp index 6a31804ab62..cb945b34502 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/runtime_precision.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/runtime_precision.cpp @@ -19,8 +19,8 @@ const std::vector params = { {makeEltwiseFunction, {Precision::BF16, Precision::BF16}, {{"Eltwise", Precision::BF16}}}, {makeEltwiseFunction, {Precision::U8, Precision::U8}, {{"Eltwise", Precision::U8}}}, {makeEltwiseFunction, {Precision::I8, Precision::I8}, {{"Eltwise", Precision::I8}}}, - {makeFakeQuantizeReluFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"Relu_original", Precision::U8}}}, - {makeFakeQuantizeReluFunction, {Precision::U8}, {{"FakeQuantize", Precision::U8}, {"Relu", Precision::U8}}}, + {makeFakeQuantizeReluFunction, {Precision::FP32}, {{"Relu", Precision::FP32}}}, + {makeFakeQuantizeReluFunction, {Precision::U8}, {{"Relu", Precision::U8}}}, {makeFakeQuantizeBinaryConvolutionFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"BinaryConvolution", Precision::BIN}}}, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp index e923aabbdcb..c91b024497a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp @@ -51,6 +51,11 @@ const std::vector fakeQuantizeOnDataValues = { { 256ul, {}, { -127.5f }, { 0.f }, { -127.5f }, { 0.f } }, "Pooling", "U8" }, + // corner case: FQ with equal constant values + { + { 256ul, {}, { 0.f }, { 0.f }, { 0.f }, { 0.f } }, + "Pooling", "U8" + }, { { 16ul, {}, { 0.f }, { 1.5f }, { 0.f }, { 1.5f } }, "Pooling", "U8" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp index 7bc53410657..1a6f52daa02 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp @@ -15,6 +15,10 @@ const std::vector netPrecisions = { //ngraph::element::f16 }; +// If snippets fuse all operations into one subgraph node, +// it's impossible to extract exec precision for the specific layer +const auto precision_for_fused_cases = ov::element::undefined; + const std::vector params = { { false, @@ -22,7 +26,7 @@ const std::vector params = { false, { 256ul, ngraph::Shape {}, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::f32, + precision_for_fused_cases, true }, { @@ -31,7 +35,7 @@ const std::vector params = { false, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::i8, + precision_for_fused_cases, false }, { @@ -40,7 +44,7 @@ const std::vector params = { false, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::u8, + precision_for_fused_cases, false }, { @@ -49,7 +53,7 @@ const std::vector params = { false, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::u8, + precision_for_fused_cases, false }, { @@ -58,7 +62,7 @@ const std::vector params = { false, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::i8, + precision_for_fused_cases, false }, { @@ -67,7 +71,7 @@ const std::vector params = { true, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ngraph::element::i8, + precision_for_fused_cases, false }, { @@ -76,7 +80,7 @@ const std::vector params = { false, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::u8, + precision_for_fused_cases, false }, { @@ -85,7 +89,7 @@ const std::vector params = { true, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, { 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ngraph::element::u8, + precision_for_fused_cases, false }, { false, {}, false, {}, {}, ngraph::element::f32, false }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/add.cpp index ebc3685c80a..acebcb77d6e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/add.cpp @@ -17,30 +17,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, Add, ::testing::Values(ov::Shape {1, 42, 16, 64}), ::testing::Values(ov::Shape {1, 42, 16, 1}), ::testing::Values(ov::element::f32), + ::testing::Values(1), ::testing::Values(1), // one node - Add - ::testing::Values(0), // SnippetsMarkSkipped disables tokenization for eltwise chains after inputs ::testing::Values(CommonTestUtils::DEVICE_CPU)), Add::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddSinh, - ::testing::Combine( - ::testing::Values(ov::Shape {1, 42, 16, 64}), - ::testing::Values(ov::Shape {1, 42, 16, 1}), - ::testing::Values(ov::element::f32), - ::testing::Values(3), // Add + 2 sinh after inputs - ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts - ::testing::Values(CommonTestUtils::DEVICE_CPU)), - AddSinh::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddSinhConst, - ::testing::Combine( - ::testing::Values(ov::Shape {1, 42, 16, 64}), - ::testing::Values(ov::element::f32), - ::testing::Values(2), // Add + sinh after inputs - ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts - ::testing::Values(CommonTestUtils::DEVICE_CPU)), - AddSinhConst::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddRollConst, ::testing::Combine( ::testing::Values(ov::Shape {1, 42, 16, 64}), @@ -58,8 +39,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_BF16, AddRollConst, ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts ::testing::Values(CommonTestUtils::DEVICE_CPU)), AddRollConst::getTestCaseName); - - } // namespace } // namespace snippets } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/convert.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/convert.cpp index 323e069ebc0..e2890469356 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/convert.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/convert.cpp @@ -40,7 +40,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Convert, Convert, ::testing::Combine( ::testing::ValuesIn(inputShapes_Convert), ::testing::ValuesIn(types_Convert), - ::testing::Values(2), + ::testing::Values(1), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -67,7 +67,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertInput, ConvertInput, ::testing::Combine( ::testing::ValuesIn(inputShapes_ConvertInput), ::testing::ValuesIn(types_ConvertInput), - ::testing::Values(3), + ::testing::Values(1), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -76,7 +76,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertOutput, ConvertOutput, ::testing::Combine( ::testing::ValuesIn(inputShapes_ConvertInput), ::testing::ValuesIn(types_ConvertInput), - ::testing::Values(3), + ::testing::Values(1), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -85,7 +85,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertStub, ConvertStub, ::testing::Combine( ::testing::ValuesIn(inputShapes_ConvertInput), ::testing::ValuesIn(types_ConvertInput), - ::testing::Values(4), + ::testing::Values(2), ::testing::Values(2), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -104,7 +104,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertPartialInputsAndResults, ConvertP ::testing::Combine( ::testing::ValuesIn(inputShapes_ConvertPartialInputsAndResults), ::testing::ValuesIn(types_ConvertPartialInputsAndResults), - ::testing::Values(6), + ::testing::Values(2), // subgraph & roll after subgraph ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -119,7 +119,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputs, ConvertManyOnInputs ::testing::Combine( ::testing::Values(std::vector{ov::Shape{5, 5, 5, 5}}), ::testing::ValuesIn(types_ConvertMany), - ::testing::Values(2), + ::testing::Values(1), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -128,7 +128,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnOutputs, ConvertManyOnOutpu ::testing::Combine( ::testing::Values(std::vector{ov::Shape{5, 5, 5, 5}}), ::testing::ValuesIn(types_ConvertMany), - ::testing::Values(2), // sinh + subgraph + ::testing::Values(1), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); @@ -142,7 +142,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputOutput, ConvertManyOnI ::testing::Combine( ::testing::Values(std::vector{ov::Shape{5, 5, 5, 5}}), ::testing::ValuesIn(types_ConvertManyIO), - ::testing::Values(2), // sinh + subgraph + ::testing::Values(1), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), Convert::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/eltwise_two_results.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/eltwise_two_results.cpp index 934a243773a..ac533cbb6c0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/eltwise_two_results.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/eltwise_two_results.cpp @@ -14,7 +14,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, EltwiseTwoResults, ::testing::Combine( ::testing::Values(ov::Shape {1, 64, 10, 10}), ::testing::Values(ov::Shape {1, 64, 10, 1}), - ::testing::Values(4), + ::testing::Values(2), ::testing::Values(2), ::testing::Values(CommonTestUtils::DEVICE_CPU)), EltwiseTwoResults::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/max_num_params_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/max_num_params_eltwise.cpp index 20c01c02be8..61fee2122cf 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/max_num_params_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/max_num_params_eltwise.cpp @@ -12,13 +12,13 @@ namespace { // Note that we need these shapes to cover all cases of code emission (none/one/multiple of scalar/vector tiles) std::vector input_shapes {{1, 64, 10, 10}, {1, 1, 17, 37}, {1, 1, 1, 1}, {1, 1, 1, 7}, {1, 1, 1, 128}, {1, 1, 1, 14}, {1, 1, 1, 16}, {1, 1, 1, 30}}; -INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, MaxNumParamsEltwiseSinh, - ::testing::Combine( +INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, MaxNumParamsEltwise, + ::testing::Combine( ::testing::ValuesIn(input_shapes), - ::testing::Values(12), // 10 Sinh after inputs + Subgraph + Concat + ::testing::Values(2), // Subgraph + Concat ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), - MaxNumParamsEltwiseSinh::getTestCaseName); + MaxNumParamsEltwise::getTestCaseName); } // namespace } // namespace snippets diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/three_inputs_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/three_inputs_eltwise.cpp index 779db741cd2..7a3fc16386e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/three_inputs_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/three_inputs_eltwise.cpp @@ -15,21 +15,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, ThreeInputsEltwise, ::testing::Values(ov::Shape {1, 64, 10, 10}), ::testing::Values(ov::Shape {1, 64, 10, 1}), ::testing::Values(ov::Shape {1, 1, 1, 10}), - ::testing::Values(2), // eltwises fuse only for non-broadcasted shapes - ::testing::Values(0), // SnippetsMarkSkipped disables tokenization for eltwise chains after inputs + ::testing::Values(1), // eltwises fuse only for non-broadcasted shapes + ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), ThreeInputsEltwise::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, ThreeInputsEltwiseSinh, - ::testing::Combine( - ::testing::Values(ov::Shape {1, 64, 10, 10}), - ::testing::Values(ov::Shape {1, 64, 10, 1}), - ::testing::Values(ov::Shape {1, 1, 1, 10}), - ::testing::Values(4), // Subgraph + 3 converts after inputs - ::testing::Values(1), // Subgraph is created, since the inputs are followed by converts - ::testing::Values(CommonTestUtils::DEVICE_CPU)), - ThreeInputsEltwiseSinh::getTestCaseName); - } // namespace } // namespace snippets } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/two_inputs_and_outputs.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/two_inputs_and_outputs.cpp index fa182cf548a..64042a3b01a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/two_inputs_and_outputs.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/two_inputs_and_outputs.cpp @@ -34,7 +34,7 @@ const std::vector> input_shapes = { INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, TwoInputsAndOutputs, ::testing::Combine( ::testing::ValuesIn(input_shapes), - ::testing::Values(4), + ::testing::Values(2), ::testing::Values(1), ::testing::Values(CommonTestUtils::DEVICE_CPU)), TwoInputsAndOutputs::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/activation.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/activation.cpp index 66be6b53013..4189351b983 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/activation.cpp @@ -156,7 +156,6 @@ std::vector netPrc = { /* ============= Activation (1D) ============= */ std::vector cpuParams_3D = { - CPUSpecificParams({nCw16c}, {nCw16c}, {}, {}), CPUSpecificParams({nwc}, {nwc}, {}, {}), CPUSpecificParams({ncw}, {ncw}, {}, {}) }; @@ -178,6 +177,27 @@ const auto basicCases3D = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_Activation3D_Eltwise_CPU_BF16, ActivationLayerCPUTest, basicCases3D, ActivationLayerCPUTest::getTestCaseName); +const std::map>> activationTypes_blocked = { + {Mish, {{}}}, + {SoftSign, {{}}} +}; + +std::vector cpuParams_3D_blocked = { + CPUSpecificParams({nCw16c}, {nCw16c}, {}, {}), +}; + +const auto blockedCases3D = ::testing::Combine( + ::testing::ValuesIn(static_shapes_to_test_representation(basic3D)), + ::testing::Values(activationShapes), + ::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes_blocked)), + ::testing::ValuesIn(netPrc), + ::testing::Values(Precision::FP32), + ::testing::Values(Precision::FP32), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_3D_blocked)) +); + +INSTANTIATE_TEST_SUITE_P(smoke_Activation3D_Eltwise_CPU_BF16_Blocked, ActivationLayerCPUTest, blockedCases3D, ActivationLayerCPUTest::getTestCaseName); + /* ============= Activation (2D) ============= */ std::vector cpuParams_4D = { CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/conversion.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/conversion.cpp index 76590873e85..80fffcf6113 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/conversion.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/conversion.cpp @@ -52,8 +52,18 @@ protected: std::tie(shapes, inPrc, outPrc, cpuParams) = GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + auto primitive = selectedType; + if (primitive.empty()) + primitive = getPrimitiveType(); + // WA: I32 precision support disabled in snippets => primitive has to be changed + // TODO: remove the WA after I32 is supported in snippets (ticket: 99803) + if (inPrc == InferenceEngine::Precision::I32 || outPrc == InferenceEngine::Precision::I32) + primitive = "unknown"; - selectedType = std::string("unknown_") + (inPrc == InferenceEngine::Precision::U8 ? "I8" : inPrc.name()); + auto exec_type_precision = inPrc != InferenceEngine::Precision::U8 + ? inPrc + : InferenceEngine::Precision(InferenceEngine::Precision::I8); + selectedType = makeSelectedTypeStr(primitive, InferenceEngine::details::convertPrecision(exec_type_precision)); for (size_t i = 0; i < shapes.second.size(); i++) { targetStaticShapes.push_back(std::vector{shapes.second[i]}); @@ -112,12 +122,10 @@ private: TEST_P(ConvertCPULayerTest, CompareWithRefs) { run(); - CheckPluginRelatedResults(compiledModel, "Convert"); + CheckPluginRelatedResults(compiledModel, std::set{"Convert", "Subgraph"}); } -std::vector inShapes_4D = { - {{1, 2, 3, 4}, {{1, 2, 3, 4}}}, - {{1, 1, 1080, 1920}, {{1, 1, 1080, 1920}}}, +std::vector inShapes_4D_dynamic = { { // dynamic {{-1, -1, -1, -1}}, @@ -154,27 +162,69 @@ const std::vector precisions_floating_point = { Precision::BF16 }; -std::vector memForm4D = { - CPUSpecificParams({nchw}, {nchw}, {}, {}), - CPUSpecificParams({nhwc}, {nhwc}, {}, {}), - CPUSpecificParams({nChw8c}, {nChw8c}, {}, {}), - CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}) +std::vector memForm4D_dynamic = { + CPUSpecificParams({nchw}, {nchw}, {}, "unknown"), + CPUSpecificParams({nhwc}, {nhwc}, {}, "unknown"), + CPUSpecificParams({nChw8c}, {nChw8c}, {}, "unknown"), + CPUSpecificParams({nChw16c}, {nChw16c}, {}, "unknown") +}; + +INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_Dynamic, ConvertCPULayerTest, + ::testing::Combine( + ::testing::ValuesIn(inShapes_4D_dynamic), + ::testing::ValuesIn(precisions), + ::testing::ValuesIn(precisions), + ::testing::ValuesIn(memForm4D_dynamic)), + ConvertCPULayerTest::getTestCaseName); + +std::vector inShapes_4D_static = { + {{1, 2, 3, 4}, {{1, 2, 3, 4}}}, + {{1, 1, 1080, 1920}, {{1, 1, 1080, 1920}}}, +}; + +std::vector memForm4D_static_common = { + CPUSpecificParams({nchw}, {nchw}, {}, {}), + CPUSpecificParams({nhwc}, {nhwc}, {}, {}), }; INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest, ConvertCPULayerTest, ::testing::Combine( - ::testing::ValuesIn(inShapes_4D), + ::testing::ValuesIn(inShapes_4D_static), ::testing::ValuesIn(precisions), ::testing::ValuesIn(precisions), - ::testing::ValuesIn(memForm4D)), + ::testing::ValuesIn(memForm4D_static_common)), ConvertCPULayerTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL, ConvertCPULayerTest, +std::vector inShapes_4D_blocked = { + {{1, 16, 5, 5}, {{1, 16, 5, 5}}}, +}; + +std::vector memForm4D_static_blocked = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}) +}; + +INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_Blocked, ConvertCPULayerTest, ::testing::Combine( - ::testing::ValuesIn(inShapes_4D), + ::testing::ValuesIn(inShapes_4D_blocked), + ::testing::ValuesIn(precisions), + ::testing::ValuesIn(precisions), + ::testing::ValuesIn(filterCPUSpecificParams(memForm4D_static_blocked))), + ConvertCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL_Static, ConvertCPULayerTest, + ::testing::Combine( + ::testing::ValuesIn(inShapes_4D_static), ::testing::ValuesIn(precisions_floating_point), ::testing::Values(Precision::BOOL), ::testing::Values(CPUSpecificParams({nchw}, {nchw}, {}, {}))), ConvertCPULayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL_Dynamic, ConvertCPULayerTest, + ::testing::Combine( + ::testing::ValuesIn(inShapes_4D_dynamic), + ::testing::ValuesIn(precisions_floating_point), + ::testing::Values(Precision::BOOL), + ::testing::Values(CPUSpecificParams({nchw}, {nchw}, {}, "unknown"))), + ConvertCPULayerTest::getTestCaseName); + } // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/eltwise.cpp index 616530dfac2..809a32abf00 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/eltwise.cpp @@ -169,7 +169,7 @@ private: TEST_P(EltwiseLayerCPUTest, CompareWithRefs) { run(); - CheckPluginRelatedResults(compiledModel, "Eltwise"); + CheckPluginRelatedResults(compiledModel, std::set{"Eltwise", "Subgraph"}); } namespace { @@ -223,7 +223,7 @@ const std::vector fusingParamsSet{ // fake quantize fusingFakeQuantizePerTensorRelu, fusingFakeQuantizePerChannelRelu, - fusingFQPerChannelSigmoidFQPerChannel + fusingFQPerChannelSigmoidFQPerTensor }; std::vector> inShapes_4D = { @@ -240,8 +240,8 @@ const auto params_4D = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), @@ -262,8 +262,8 @@ const auto params_4D_fusing = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(cpuParams_4D), @@ -278,8 +278,8 @@ const auto params_4D_emptyCPUSpec = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::Values(emptyCPUSpec), @@ -301,8 +301,8 @@ const auto params_5D = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), @@ -317,8 +317,8 @@ const auto params_5D_emptyCPUSpec = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::Values(emptyCPUSpec), @@ -346,8 +346,8 @@ const auto params_5D_emptyCPUSpec_I32 = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::i32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::Values(emptyCPUSpec), @@ -372,8 +372,8 @@ const auto params_4D_Blocked_Planar = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Planar)), @@ -398,8 +398,8 @@ const auto params_4D_Planar_Blocked = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar_Blocked)), @@ -424,8 +424,8 @@ const auto params_5D_Blocked_Planar = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Planar)), @@ -450,8 +450,8 @@ const auto params_5D_Planar_Blocked = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Planar_Blocked)), @@ -478,8 +478,8 @@ const auto params_4D_1D_constant_mode = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode)), @@ -488,8 +488,6 @@ const auto params_4D_1D_constant_mode = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName); std::vector cpuParams_4D_1D_Parameter_mode = { - CPUSpecificParams({nChw16c, x}, {nChw16c}, {}, {}), - CPUSpecificParams({nhwc, x}, {nhwc}, {}, {}), CPUSpecificParams({nchw, x}, {nchw}, {}, {}) }; @@ -500,8 +498,8 @@ const auto params_4D_1D_parameter_mode = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode)), @@ -527,8 +525,8 @@ const auto params_5D_1D_constant = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant)), @@ -537,8 +535,6 @@ const auto params_5D_1D_constant = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName); std::vector cpuParams_5D_1D_parameter = { - CPUSpecificParams({nCdhw16c, x}, {nCdhw16c}, {}, {}), - CPUSpecificParams({ndhwc, x}, {ndhwc}, {}, {}), CPUSpecificParams({ncdhw, x}, {ncdhw}, {}, {}) }; @@ -549,8 +545,8 @@ const auto params_5D_1D_parameter = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter)), @@ -602,8 +598,8 @@ const auto params_4D_dyn_const = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), @@ -641,8 +637,8 @@ const auto params_4D_dyn_param = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), @@ -682,8 +678,8 @@ const auto params_4D_dyn_param_fusing = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(cpuParams_4D), @@ -713,8 +709,8 @@ const auto params_5D_dyn_const = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), @@ -752,8 +748,8 @@ const auto params_5D_dyn_param = ::testing::Combine( ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netType), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp index 69469bcdefe..c8bdfc4c003 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp @@ -115,9 +115,8 @@ std::string CPUTestsBase::impls2str(const std::vector &priority) { return str; } -void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType) const { - if (!execNet) return; - if (nodeType.empty()) return; +void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::set& nodeType) const { + if (!execNet || nodeType.empty()) return; ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined."; InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo(); @@ -125,16 +124,23 @@ void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork CheckPluginRelatedResultsImpl(function, nodeType); } -void CPUTestsBase::CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::string& nodeType) const { - if (!execNet) return; - if (nodeType.empty()) return; +void CPUTestsBase::CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::set& nodeType) const { + if (!execNet || nodeType.empty()) return; ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined."; auto function = execNet.get_runtime_model(); CheckPluginRelatedResultsImpl(function, nodeType); } -void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::string& nodeType) const { +void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType) const { + CheckPluginRelatedResults(execNet, std::set{nodeType}); +} + +void CPUTestsBase::CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::string& nodeType) const { + CheckPluginRelatedResults(execNet, std::set{nodeType}); +} + +void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::set& nodeType) const { ASSERT_NE(nullptr, function); for (const auto &node : function->get_ops()) { const auto & rtInfo = node->get_rt_info(); @@ -161,7 +167,7 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptrget_input_size()); ASSERT_LE(outFmts.size(), node->get_output_size()); for (int i = 0; i < inFmts.size(); i++) { @@ -212,7 +218,6 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr &lastNode, std::string name); + void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::set& nodeType) const; + void CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::set& nodeType) const; void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType) const; void CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::string& nodeType) const; static const char* any_type; protected: - virtual void CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::string& nodeType) const; + virtual void CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::set& nodeType) const; /** * @brief This function modifies the initial single layer test graph to add any necessary modifications that are specific to the cpu test scope. * @param ngPrc Graph precision. diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.cpp b/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.cpp index 2b0e2106974..8bee46ef920 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.cpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.cpp @@ -36,7 +36,7 @@ CpuTestWithFusing::modifyGraph(const ngraph::element::Type &ngPrc, ngraph::Param return retNode; } -void CpuTestWithFusing::CheckFusingResults(const std::shared_ptr& function, const std::string& nodeType) const { +void CpuTestWithFusing::CheckFusingResults(const std::shared_ptr& function, const std::set& nodeType) const { ASSERT_NE(nullptr, function); bool isNodeFound = false; for (const auto & op : function->get_ops()) { @@ -49,22 +49,29 @@ void CpuTestWithFusing::CheckFusingResults(const std::shared_ptrget_friendly_name(); - auto pos = originalLayersNames.find(opFriendlyName); - ASSERT_TRUE(pos != std::string::npos) << "Operation name " << op->get_friendly_name() << " has not been found in originalLayersNames!"; + ASSERT_TRUE(originalLayersNames.find(opFriendlyName) != std::string::npos) + << "Operation name " << opFriendlyName << " has not been found in originalLayersNames!"; + + size_t pos = 0; for (const auto& fusedOp : fusedOps) { pos = originalLayersNames.find(fusedOp, checkFusingPosition ? pos : 0); ASSERT_TRUE(pos != std::string::npos) << "Fused op " << fusedOp << " has not been found!"; } } } - ASSERT_TRUE(isNodeFound) << "Node type name: \"" << nodeType << "\" has not been found."; + std::stringstream error_message; + error_message << "Node with types \""; + for (const auto& elem : nodeType) + error_message << elem << ", "; + error_message << "\" wasn't found"; + ASSERT_TRUE(isNodeFound) << error_message.str(); } -void CpuTestWithFusing::CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::string& nodeType) const { +void CpuTestWithFusing::CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::set& nodeType) const { CPUTestsBase::CheckPluginRelatedResultsImpl(function, nodeType); CheckFusingResults(function, nodeType); } diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.hpp index ad331e48c4e..db354f82eac 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/fusing_test_utils.hpp @@ -72,10 +72,10 @@ protected: ngraph::ParameterVector ¶ms, const std::shared_ptr &lastNode) override; - void CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::string& nodeType) const override; + void CheckPluginRelatedResultsImpl(const std::shared_ptr& function, const std::set& nodeType) const override; private: - void CheckFusingResults(const std::shared_ptr& function, const std::string& nodeType) const; + void CheckFusingResults(const std::shared_ptr& function, const std::set& nodeType) const; protected: std::shared_ptr postOpMgrPtr; @@ -325,6 +325,28 @@ const auto fusingFQPerChannelSigmoidFQPerChannel = fusingSpecificParams{std::mak return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); }, "FakeQuantize(PerChannel)"}}), {"FakeQuantize", "Sigmoid", "FakeQuantize"}}; +const auto fusingFQPerChannelSigmoidFQPerTensor = fusingSpecificParams{std::make_shared(std::vector{ + {[](postNodeConfig& cfg){ + auto localPrc = cfg.input->get_element_type(); + auto shape = cfg.input->get_output_partial_shape(0); + if (shape.size() == 1) + IE_THROW() << "If shape.size() == 1 then Granularity can be PerTensor only"; + ngraph::Shape newShape(shape.size(), 1); + newShape[1] = shape[1].get_length(); + return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); + }, "FakeQuantize(PerChannel)"}, + {[](postNodeConfig& cfg){ + return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sigmoid); + }, "Sigmoid"}, + {[](postNodeConfig& cfg){ + auto localPrc = cfg.input->get_element_type(); + auto shape = cfg.input->get_output_partial_shape(0); + if (shape.size() == 1) + IE_THROW() << "If shape.size() == 1 then Granularity can be PerTensor only"; + ngraph::Shape newShape(shape.size(), 1); + return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); + }, "FakeQuantize(PerTensor)"}}), {"FakeQuantize", "Sigmoid", "FakeQuantize"}}; + const auto fusingFakeQuantizePerTensorRelu = fusingSpecificParams{std::make_shared(std::vector{ {[](postNodeConfig& cfg) { auto localPrc = cfg.input->get_element_type(); diff --git a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp index c02eb1a2a45..aee3aff68bf 100644 --- a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp +++ b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp @@ -22,14 +22,6 @@ public: } }; -TEST_F(SnippetsMarkSkippedTests, smoke_Snippets_SkipAfterInputsEltwise) { - const auto &f = EltwiseFunction({{2, 3}, {1, 3}}); - function = f.getOriginal(); - // None subgraphs are expected, since the whole graph is an eltwise chain after input - function_ref = f.getOriginal(); - run(); -} - TEST_F(SnippetsMarkSkippedTests, smoke_Snippets_SkipAfterInputsMatMulEltwise) { const auto &f = MatMulEltwiseBranchesFunction(std::vector {{1, 3, 4, 4}, {1, 3, 4, 4}}); function = f.getOriginal(); diff --git a/src/tests/functional/plugin/shared/include/snippets/add.hpp b/src/tests/functional/plugin/shared/include/snippets/add.hpp index 84338e53215..7499d8ade45 100644 --- a/src/tests/functional/plugin/shared/include/snippets/add.hpp +++ b/src/tests/functional/plugin/shared/include/snippets/add.hpp @@ -36,20 +36,15 @@ protected: void SetUp() override; }; -class AddSinh : public Add { -protected: - void SetUp() override; -}; - -class AddSinhConst : public testing::WithParamInterface, - virtual public ov::test::SnippetsTestsCommon { +class AddConst : public testing::WithParamInterface, + virtual public ov::test::SnippetsTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj); protected: void SetUp() override; }; -class AddRollConst : public AddSinhConst { +class AddRollConst : public AddConst { protected: void SetUp() override; }; diff --git a/src/tests/functional/plugin/shared/include/snippets/max_num_params_eltwise.hpp b/src/tests/functional/plugin/shared/include/snippets/max_num_params_eltwise.hpp index 26640e58910..70803a346fa 100644 --- a/src/tests/functional/plugin/shared/include/snippets/max_num_params_eltwise.hpp +++ b/src/tests/functional/plugin/shared/include/snippets/max_num_params_eltwise.hpp @@ -17,8 +17,8 @@ typedef std::tuple< std::string // Target Device > MaxNumParamsEltwiseParams; -class MaxNumParamsEltwiseSinh : public testing::WithParamInterface, - virtual public ov::test::SnippetsTestsCommon { +class MaxNumParamsEltwise : public testing::WithParamInterface, + virtual public ov::test::SnippetsTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj); diff --git a/src/tests/functional/plugin/shared/include/snippets/three_inputs_eltwise.hpp b/src/tests/functional/plugin/shared/include/snippets/three_inputs_eltwise.hpp index bb39b7ded31..2bb61b3b2b7 100644 --- a/src/tests/functional/plugin/shared/include/snippets/three_inputs_eltwise.hpp +++ b/src/tests/functional/plugin/shared/include/snippets/three_inputs_eltwise.hpp @@ -28,11 +28,6 @@ protected: void SetUp() override; }; -class ThreeInputsEltwiseSinh : public ThreeInputsEltwise { -protected: - void SetUp() override; -}; - } // namespace snippets } // namespace test diff --git a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp index 055ac49504f..f867835919d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp @@ -241,6 +241,76 @@ const char expected_serialized_model[] = R"V0G0N( )V0G0N"; +const char expected_serialized_model_cpu[] = R"V0G0N( + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 1 + + + + + + + + 1 + + + + + + + + + + + + + +)V0G0N"; + std::string ExecGraphSerializationTest::getTestCaseName(testing::TestParamInfo obj) { std::ostringstream result; @@ -354,7 +424,7 @@ TEST_P(ExecGraphSerializationTest, ExecutionGraph) { pugi::xml_document expected; pugi::xml_document result; - ASSERT_TRUE(expected.load_string(expected_serialized_model)); + ASSERT_TRUE(expected.load_string(target_device == "CPU" ? expected_serialized_model_cpu : expected_serialized_model)); ASSERT_TRUE(result.load_file(m_out_xml_path.c_str())); bool status; diff --git a/src/tests/functional/plugin/shared/src/snippets/add.cpp b/src/tests/functional/plugin/shared/src/snippets/add.cpp index c524a54539f..beb85401f52 100644 --- a/src/tests/functional/plugin/shared/src/snippets/add.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/add.cpp @@ -38,18 +38,7 @@ void Add::SetUp() { setInferenceType(type); } -void AddSinh::SetUp() { - ov::Shape inputShape0, inputShape1; - ov::element::Type type; - std::tie(inputShape0, inputShape1, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); - init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}}); - - auto f = ov::test::snippets::AddSinhFunction({inputShape0, inputShape1}); - function = f.getOriginal(); - setInferenceType(type); -} - -std::string AddSinhConst::getTestCaseName(testing::TestParamInfo obj) { +std::string AddConst::getTestCaseName(testing::TestParamInfo obj) { ov::Shape inputShapes, newInputShapes; ov::element::Type type; std::string targetDevice; @@ -65,13 +54,13 @@ std::string AddSinhConst::getTestCaseName(testing::TestParamInfoGetParam(); init_input_shapes({{{}, {inputShape, }}}); - auto f = ov::test::snippets::AddSinhConstFunction({inputShape}); + auto f = ov::test::snippets::AddConstFunction({inputShape}); function = f.getOriginal(); setInferenceType(type); } @@ -92,12 +81,7 @@ TEST_P(Add, CompareWithRefImpl) { validateNumSubgraphs(); } -TEST_P(AddSinh, CompareWithRefImpl) { - run(); - validateNumSubgraphs(); -} - -TEST_P(AddSinhConst, CompareWithRefImpl) { +TEST_P(AddConst, CompareWithRefImpl) { run(); validateNumSubgraphs(); } diff --git a/src/tests/functional/plugin/shared/src/snippets/max_num_params_eltwise.cpp b/src/tests/functional/plugin/shared/src/snippets/max_num_params_eltwise.cpp index 1140937be63..1061a2a4f1b 100644 --- a/src/tests/functional/plugin/shared/src/snippets/max_num_params_eltwise.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/max_num_params_eltwise.cpp @@ -10,7 +10,7 @@ namespace ov { namespace test { namespace snippets { -std::string MaxNumParamsEltwiseSinh::getTestCaseName(testing::TestParamInfo obj) { +std::string MaxNumParamsEltwise::getTestCaseName(testing::TestParamInfo obj) { ov::Shape inputShapes; std::string targetDevice; size_t num_nodes, num_subgraphs; @@ -24,7 +24,7 @@ std::string MaxNumParamsEltwiseSinh::getTestCaseName(testing::TestParamInfoGetParam(); std::vector expandedShapes(10, inputShape); @@ -35,11 +35,11 @@ void MaxNumParamsEltwiseSinh::SetUp() { init_input_shapes(input_shapes); - auto f = ov::test::snippets::EltwiseMaxNumParamsSinhFunction(expandedShapes); + auto f = ov::test::snippets::EltwiseMaxNumParamsFunction(expandedShapes); function = f.getOriginal(); } -TEST_P(MaxNumParamsEltwiseSinh, CompareWithRefImpl) { +TEST_P(MaxNumParamsEltwise, CompareWithRefImpl) { run(); validateNumSubgraphs(); } diff --git a/src/tests/functional/plugin/shared/src/snippets/three_inputs_eltwise.cpp b/src/tests/functional/plugin/shared/src/snippets/three_inputs_eltwise.cpp index 276218e6150..0c601cc8ebe 100644 --- a/src/tests/functional/plugin/shared/src/snippets/three_inputs_eltwise.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/three_inputs_eltwise.cpp @@ -37,26 +37,11 @@ void ThreeInputsEltwise::SetUp() { function = f.getOriginal(); } -void ThreeInputsEltwiseSinh::SetUp() { - ov::Shape inputShape0, inputShape1, inputShape2; - std::tie(inputShape0, inputShape1, inputShape2, - ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam(); - init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}, {{}, {inputShape2, }}}); - - auto f = ov::test::snippets::EltwiseThreeInputsSinhFunction({inputShape0, inputShape1, inputShape2}); - function = f.getOriginal(); -} - TEST_P(ThreeInputsEltwise, CompareWithRefImpl) { run(); validateNumSubgraphs(); } -TEST_P(ThreeInputsEltwiseSinh, CompareWithRefImpl) { - run(); - validateNumSubgraphs(); -} - } // namespace snippets } // namespace test } // namespace ov diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_simple.hpp b/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_simple.hpp index dd9f342d7b3..6ebc6acd7d7 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_simple.hpp +++ b/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_simple.hpp @@ -29,32 +29,14 @@ protected: std::shared_ptr initOriginal() const override; std::shared_ptr initReference() const override; }; -/// Add separated from inputs by Sinh to WA CPU-specific disabling after inputs. -/// Works because Sinh is not supported by tokenization yet. -/// Tokenized simply by starting subgraph. -// in1 in2 -// Sinh Sinh -// Add -// Result -// todo: remove Sinh once "no subgraph after input" limitation is relaxed -class AddSinhFunction : public SnippetsFunctionBase { -public: - explicit AddSinhFunction(const std::vector& inputShapes) : SnippetsFunctionBase(inputShapes) { - NGRAPH_CHECK(input_shapes.size() == 2, "Got invalid number of input shapes"); - } -protected: - std::shared_ptr initOriginal() const override; - std::shared_ptr initReference() const override; -}; /// Like AddSinh but with a constant second input (and no sinh on in) // in1 in2 -// Sin Sinh // Add // Result // todo: remove Sinh once "no subgraph after input" limitation is relaxed -class AddSinhConstFunction : public SnippetsFunctionBase { +class AddConstFunction : public SnippetsFunctionBase { public: - explicit AddSinhConstFunction(const std::vector& inputShapes) : SnippetsFunctionBase(inputShapes) { + explicit AddConstFunction(const std::vector& inputShapes) : SnippetsFunctionBase(inputShapes) { NGRAPH_CHECK(input_shapes.size() == 1, "Got invalid number of input shapes"); } protected: @@ -108,30 +90,16 @@ public: protected: std::shared_ptr initOriginal() const override; }; -/// EltwiseFunctionThreeInputs with Sinh after inputs to to WA CPU-specific disabling after inputs -/// See AddSinh for details. -// todo: remove Sinh once "no subgraph after input" limitation is relaxed -class EltwiseThreeInputsSinhFunction : public SnippetsFunctionBase { -public: - explicit EltwiseThreeInputsSinhFunction(const std::vector& inputShapes) : - SnippetsFunctionBase(inputShapes) { - NGRAPH_CHECK(input_shapes.size() == 3, "Got invalid number of input shapes"); - } -protected: - std::shared_ptr initOriginal() const override; -}; /// Eltwise graph with 10 inputs and 2 outputs. /// Needed to test for a max number of inputs+outputs allowed. // in1 in2 in3 ... in10 -// Sinh Sinh Sinh ...Sinh // ........................ // Subtract Power // \ Sinh // Result -// todo: remove Sinh once "no subgraph after input" limitation is relaxed -class EltwiseMaxNumParamsSinhFunction : public SnippetsFunctionBase { +class EltwiseMaxNumParamsFunction : public SnippetsFunctionBase { public: - explicit EltwiseMaxNumParamsSinhFunction(const std::vector& inputShapes) : + explicit EltwiseMaxNumParamsFunction(const std::vector& inputShapes) : SnippetsFunctionBase(inputShapes) { NGRAPH_CHECK(input_shapes.size() == 10, "Got invalid number of input shapes"); } @@ -181,7 +149,6 @@ protected: /// So we have 2 subgraphs - Snippets don't support subgraphs with many results /// Also Output tensors have names to check correct copying output names // in1 in2 -// Sinh Sinh // Add // HSwish Result // Relu @@ -198,7 +165,6 @@ protected: /// Two different Input and Outputs. /// This function is to check correct Broadcasting // in1 in2 -// Sin Sin // HSwish / // Result Add // Relu diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_convert.cpp b/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_convert.cpp index 5c743cf2006..17f419757a0 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_convert.cpp +++ b/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_convert.cpp @@ -19,15 +19,13 @@ std::shared_ptr createRollAsStub(const std::shared_ptr& pare std::shared_ptr ConvertFunction::initOriginal() const { auto data0 = std::make_shared(inType, input_shapes[0]); - auto stub = createRollAsStub(data0); - auto convert = std::make_shared(stub, outType); + auto convert = std::make_shared(data0, outType); return std::make_shared(NodeVector{convert}, ParameterVector{data0}); } std::shared_ptr ConvertFunction::initReference() const { auto data0 = std::make_shared(inType, input_shapes[0]); - auto stub = createRollAsStub(data0); - auto indata0 = std::make_shared(inType, stub->get_shape()); - auto subgraph = std::make_shared(NodeVector{stub}, + auto indata0 = std::make_shared(inType, data0->get_shape()); + auto subgraph = std::make_shared(NodeVector{data0}, std::make_shared(NodeVector{std::make_shared(indata0, outType)}, ParameterVector{indata0})); return std::make_shared(NodeVector{subgraph}, ParameterVector{data0}); @@ -36,21 +34,17 @@ std::shared_ptr ConvertFunction::initReference() const { std::shared_ptr ConvertInputFunction::initOriginal() const { auto data0 = std::make_shared(inType, input_shapes[0]); auto data1 = std::make_shared(outType, input_shapes[1]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto convert = std::make_shared(stub0, outType); - auto add = std::make_shared(convert, stub1); + auto convert = std::make_shared(data0, outType); + auto add = std::make_shared(convert, data1); return std::make_shared(NodeVector{add}, ParameterVector{data0, data1}); } std::shared_ptr ConvertInputFunction::initReference() const { auto data0 = std::make_shared(inType, input_shapes[0]); auto data1 = std::make_shared(outType, input_shapes[1]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto indata0 = std::make_shared(inType, stub0->get_shape()); - auto indata1 = std::make_shared(outType, stub1->get_shape()); + auto indata0 = std::make_shared(inType, data0->get_shape()); + auto indata1 = std::make_shared(outType, data1->get_shape()); auto convert = std::make_shared(indata0, outType); - auto subgraph = std::make_shared(NodeVector{stub0, stub1}, + auto subgraph = std::make_shared(NodeVector{data0, data1}, std::make_shared( NodeVector{std::make_shared(convert, indata1)}, ParameterVector{indata0, indata1})); @@ -60,22 +54,18 @@ std::shared_ptr ConvertInputFunction::initReference() const { std::shared_ptr ConvertOutputFunction::initOriginal() const { auto data0 = std::make_shared(inType, input_shapes[0]); auto data1 = std::make_shared(inType, input_shapes[1]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto add = std::make_shared(stub0, stub1); + auto add = std::make_shared(data0, data1); auto convert = std::make_shared(add, outType); return std::make_shared(NodeVector{convert}, ParameterVector{data0, data1}); } std::shared_ptr ConvertOutputFunction::initReference() const { auto data0 = std::make_shared(inType, input_shapes[0]); auto data1 = std::make_shared(inType, input_shapes[1]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto indata0 = std::make_shared(inType, stub0->get_shape()); - auto indata1 = std::make_shared(inType, stub1->get_shape()); + auto indata0 = std::make_shared(inType, data0->get_shape()); + auto indata1 = std::make_shared(inType, data1->get_shape()); auto add = std::make_shared(indata0, indata1); auto convert = std::make_shared(add, outType); - auto subgraph = std::make_shared(NodeVector{stub0, stub1}, + auto subgraph = std::make_shared(NodeVector{data0, data1}, std::make_shared( NodeVector{convert}, ParameterVector{indata0, indata1})); @@ -85,9 +75,7 @@ std::shared_ptr ConvertOutputFunction::initReference() const { std::shared_ptr ConvertStubFunction::initOriginal() const { auto data0 = std::make_shared(inType, input_shapes[0]); auto data1 = std::make_shared(inType, input_shapes[1]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto add = std::make_shared(stub0, stub1); + auto add = std::make_shared(data0, data1); auto convert = std::make_shared(add, outType); auto relu = std::make_shared(convert); return std::make_shared(NodeVector{relu}, ParameterVector{data0, data1}); @@ -95,14 +83,12 @@ std::shared_ptr ConvertStubFunction::initOriginal() const { std::shared_ptr ConvertStubFunction::initReference() const { auto data0 = std::make_shared(inType, input_shapes[0]); auto data1 = std::make_shared(inType, input_shapes[1]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto indata0 = std::make_shared(inType, stub0->get_shape()); - auto indata1 = std::make_shared(inType, stub1->get_shape()); + auto indata0 = std::make_shared(inType, data0->get_shape()); + auto indata1 = std::make_shared(inType, data1->get_shape()); auto add = std::make_shared(indata0, indata1); auto convert = std::make_shared(add, outType); auto subgraph0 = std::make_shared( - NodeVector{stub0, stub1}, std::make_shared(NodeVector{convert}, ParameterVector{indata0, indata1})); + NodeVector{data0, data1}, std::make_shared(NodeVector{convert}, ParameterVector{indata0, indata1})); auto indata2 = std::make_shared(convert->get_destination_type(), convert->get_shape()); auto relu = std::make_shared(indata2); auto subgraph1 = std::make_shared( @@ -114,14 +100,11 @@ std::shared_ptr ConvertPartialInputsAndResultsFunction::initOriginal( auto data0 = std::make_shared(inTypes[0], input_shapes[0]); auto data1 = std::make_shared(inTypes[1], input_shapes[1]); auto data2 = std::make_shared(inTypes[2], input_shapes[2]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto stub2 = createRollAsStub(data2); - auto convert0 = std::make_shared(stub0, outTypes[0]); - auto convert1 = std::make_shared(stub1, outTypes[0]); + auto convert0 = std::make_shared(data0, outTypes[0]); + auto convert1 = std::make_shared(data1, outTypes[0]); auto add = std::make_shared(convert0, convert1); auto relu = std::make_shared(add); - auto sub = std::make_shared(relu, stub2); + auto sub = std::make_shared(relu, data2); auto stub3 = createRollAsStub(sub); auto convert2 = std::make_shared(relu, outTypes[1]); return std::make_shared(NodeVector{convert2, stub3}, ParameterVector{data0, data1, data2}); @@ -130,12 +113,9 @@ std::shared_ptr ConvertPartialInputsAndResultsFunction::initReference auto data0 = std::make_shared(inTypes[0], input_shapes[0]); auto data1 = std::make_shared(inTypes[1], input_shapes[1]); auto data2 = std::make_shared(inTypes[2], input_shapes[2]); - auto stub0 = createRollAsStub(data0); - auto stub1 = createRollAsStub(data1); - auto stub2 = createRollAsStub(data2); - auto indata0 = std::make_shared(inTypes[0], stub0->get_shape()); - auto indata1 = std::make_shared(inTypes[1], stub1->get_shape()); - auto indata2 = std::make_shared(inTypes[2], stub2->get_shape()); + auto indata0 = std::make_shared(inTypes[0], data0->get_shape()); + auto indata1 = std::make_shared(inTypes[1], data1->get_shape()); + auto indata2 = std::make_shared(inTypes[2], data2->get_shape()); auto convert0 = std::make_shared(indata0, outTypes[0]); auto convert1 = std::make_shared(indata1, outTypes[0]); auto add = std::make_shared(convert0, convert1); @@ -143,7 +123,7 @@ std::shared_ptr ConvertPartialInputsAndResultsFunction::initReference auto sub = std::make_shared(relu, indata2); auto convert2 = std::make_shared(relu, outTypes[1]); auto subgraph = std::make_shared( - NodeVector{stub0, stub1, stub2}, std::make_shared(NodeVector{sub, convert2}, ParameterVector{indata0, indata1, indata2})); + NodeVector{data0, data1, data2}, std::make_shared(NodeVector{sub, convert2}, ParameterVector{indata0, indata1, indata2})); auto stub3 = createRollAsStub(subgraph); return std::make_shared(OutputVector{subgraph->output(1), stub3->output(0)}, ParameterVector{data0, data1, data2}); @@ -151,8 +131,7 @@ std::shared_ptr ConvertPartialInputsAndResultsFunction::initReference std::shared_ptr ConvertManyOnInputsFunction::initOriginal() const { auto data0 = std::make_shared(types[0], input_shapes[0]); - auto stub0 = createRollAsStub(data0); - std::shared_ptr out = stub0; + std::shared_ptr out = data0; for (auto i = 1; i < types.size(); i++) { auto convert = std::make_shared(out, types[i]); out = convert; @@ -162,23 +141,21 @@ std::shared_ptr ConvertManyOnInputsFunction::initOriginal() const { } std::shared_ptr ConvertManyOnInputsFunction::initReference() const { auto data0 = std::make_shared(types[0], input_shapes[0]); - auto stub0 = createRollAsStub(data0); - auto indata0 = std::make_shared(types[0], stub0->get_shape()); + auto indata0 = std::make_shared(types[0], data0->get_shape()); std::shared_ptr out = indata0; for (auto i = 1; i < types.size(); i++) { auto convert = std::make_shared(out, types[i]); out = convert; } auto relu = std::make_shared(out); - auto subgraph = std::make_shared(NodeVector{stub0}, + auto subgraph = std::make_shared(NodeVector{data0}, std::make_shared(NodeVector{relu}, ParameterVector{indata0})); return std::make_shared(NodeVector{subgraph}, ParameterVector{data0}); } std::shared_ptr ConvertManyOnOutputsFunction::initOriginal() const { auto data0 = std::make_shared(types[0], input_shapes[0]); - auto stub0 = std::make_shared(data0); - auto relu = std::make_shared(stub0); + auto relu = std::make_shared(data0); std::shared_ptr out = relu; for (auto i = 1; i < types.size(); i++) { auto convert = std::make_shared(out, types[i]); @@ -188,28 +165,26 @@ std::shared_ptr ConvertManyOnOutputsFunction::initOriginal() const { } std::shared_ptr ConvertManyOnOutputsFunction::initReference() const { auto data0 = std::make_shared(types[0], input_shapes[0]); - auto stub0 = std::make_shared(data0); - auto indata0 = std::make_shared(types[0], stub0->get_shape()); + auto indata0 = std::make_shared(types[0], data0->get_shape()); auto relu = std::make_shared(indata0); std::shared_ptr out = relu; for (auto i = 1; i < types.size(); i++) { auto convert = std::make_shared(out, types[i]); out = convert; } - auto subgraph = std::make_shared(NodeVector{stub0}, + auto subgraph = std::make_shared(NodeVector{data0}, std::make_shared(NodeVector{out}, ParameterVector{indata0})); return std::make_shared(NodeVector{subgraph}, ParameterVector{data0}); } std::shared_ptr ConvertManyOnInputOutputFunction::initOriginal() const { auto data0 = std::make_shared(inTypes[0], input_shapes[0]); - auto stub0 = std::make_shared(data0); - std::shared_ptr out = stub0; + std::shared_ptr out = data0; for (auto i = 1; i < inTypes.size(); i++) { auto convert = std::make_shared(out, inTypes[i]); out = convert; } - auto relu = std::make_shared(stub0); + auto relu = std::make_shared(data0); out = relu; for (auto i = 0; i < outTypes.size(); i++) { auto convert = std::make_shared(out, outTypes[i]); @@ -219,20 +194,19 @@ std::shared_ptr ConvertManyOnInputOutputFunction::initOriginal() cons } std::shared_ptr ConvertManyOnInputOutputFunction::initReference() const { auto data0 = std::make_shared(inTypes[0], input_shapes[0]); - auto stub0 = std::make_shared(data0); - auto indata0 = std::make_shared(inTypes[0], stub0->get_shape()); + auto indata0 = std::make_shared(inTypes[0], data0->get_shape()); std::shared_ptr out = indata0; for (auto i = 1; i < inTypes.size(); i++) { auto convert = std::make_shared(out, inTypes[i]); out = convert; } - auto relu = std::make_shared(stub0); + auto relu = std::make_shared(data0); out = relu; for (auto i = 0; i < outTypes.size(); i++) { auto convert = std::make_shared(out, outTypes[i]); out = convert; } - auto subgraph = std::make_shared(NodeVector{stub0}, + auto subgraph = std::make_shared(NodeVector{data0}, std::make_shared(NodeVector{out}, ParameterVector{indata0})); return std::make_shared(NodeVector{subgraph}, ParameterVector{data0}); } diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp b/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp index 080fb25d7a1..a8e7aa6aa76 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp +++ b/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp @@ -26,32 +26,11 @@ std::shared_ptr AddFunction::initReference() const { ParameterVector{indata0, indata1})); return std::make_shared(NodeVector{add}, ParameterVector{data0, data1}); } -std::shared_ptr AddSinhFunction::initOriginal() const { - auto data0 = std::make_shared(precision, input_shapes[0]); - auto data1 = std::make_shared(precision, input_shapes[1]); - auto sin0 = std::make_shared(data0); - auto sin1 = std::make_shared(data1); - auto add = std::make_shared(sin0, sin1); - return std::make_shared(NodeVector{add}, ParameterVector{data0, data1}); -} -std::shared_ptr AddSinhFunction::initReference() const { - auto data0 = std::make_shared(precision, input_shapes[0]); - auto data1 = std::make_shared(precision, input_shapes[1]); - auto sin0 = std::make_shared(data0); - auto sin1 = std::make_shared(data1); - auto indata0 = std::make_shared(precision, sin0->get_shape()); - auto indata1 = std::make_shared(precision, sin1->get_shape()); - auto add = std::make_shared(NodeVector{data0, data1}, - std::make_shared(NodeVector{std::make_shared(sin0, sin1)}, - ParameterVector{indata0, indata1})); - return std::make_shared(NodeVector{add}, ParameterVector{data0, data1}); -} -std::shared_ptr AddSinhConstFunction::initOriginal() const { +std::shared_ptr AddConstFunction::initOriginal() const { auto data0 = std::make_shared(precision, input_shapes[0]); const std::vector const_values = CommonTestUtils::generate_float_numbers(shape_size(input_shapes[0]), -10., 10.); auto const_data1 = std::make_shared(precision, input_shapes[0], const_values); - auto sin0 = std::make_shared(data0); - auto add = std::make_shared(sin0, const_data1); + auto add = std::make_shared(data0, const_data1); return std::make_shared(NodeVector{add}, ParameterVector{data0}); } std::shared_ptr AddRollConstFunction::initOriginal() const { @@ -105,31 +84,15 @@ std::shared_ptr EltwiseThreeInputsFunction::initOriginal() const { return std::make_shared(NodeVector{mul}, ParameterVector{data0, data1, data2}); } -std::shared_ptr EltwiseThreeInputsSinhFunction::initOriginal() const { - auto data0 = std::make_shared(precision, input_shapes[0]); - auto data1 = std::make_shared(precision, input_shapes[1]); - auto data2 = std::make_shared(precision, input_shapes[2]); - auto sinh0 = std::make_shared(data0); - auto sinh1 = std::make_shared(data1); - auto sinh2 = std::make_shared(data2); - const std::vector const_values = CommonTestUtils::generate_float_numbers(1, -10., 10.); - auto const_data = std::make_shared(ov::element::f32, Shape{1}, const_values); - auto add = std::make_shared(sinh0, sinh1); - auto sub = std::make_shared(sinh2, const_data); - auto mul = std::make_shared(add, sub); - return std::make_shared(NodeVector{mul}, ParameterVector{data0, data1, data2}); -} -std::shared_ptr EltwiseMaxNumParamsSinhFunction::initOriginal() const { +std::shared_ptr EltwiseMaxNumParamsFunction::initOriginal() const { ParameterVector params; - std::vector> sinh; // 10 for (const auto& shape : input_shapes) { auto param = std::make_shared(precision, shape); params.push_back(param); - sinh.push_back(std::make_shared(param)); } std::vector> add; // 5 for (size_t i = 0; i < input_shapes.size() / 2; i++) { - add.push_back(std::make_shared(sinh[i * 2], sinh[i * 2 + 1])); + add.push_back(std::make_shared(params[i * 2], params[i * 2 + 1])); } std::vector> mul; // 2 for (size_t i = 0; i < add.size() / 2; i++) { @@ -235,11 +198,7 @@ std::shared_ptr EltwiseTwoResultsFunction::initOriginal() const { data0->set_friendly_name("data0"); auto data1 = std::make_shared(precision, input_shapes[1]); data1->set_friendly_name("data1"); - auto sinh0 = std::make_shared(data0); - sinh0->set_friendly_name("sinh0"); - auto sinh1 = std::make_shared(data1); - sinh1->set_friendly_name("sinh1"); - auto add = std::make_shared(sinh0, sinh1); + auto add = std::make_shared(data0, data1); add->set_friendly_name("add"); auto hswish = std::make_shared(add); hswish->set_friendly_name("hswish"); @@ -267,17 +226,14 @@ std::shared_ptr EltwiseTwoResultsFunction::initReference() const { data0->set_friendly_name("data0"); auto data1 = std::make_shared(precision, input_shapes[1]); data1->set_friendly_name("data1"); - auto sinh0 = std::make_shared(data0); - sinh0->set_friendly_name("sinh0"); - auto sinh1 = std::make_shared(data1); - sinh1->set_friendly_name("sinh1"); - auto indata0 = std::make_shared(precision, sinh0->get_shape()); - auto indata1 = std::make_shared(precision, sinh1->get_shape()); + + auto indata0 = std::make_shared(precision, data0->get_shape()); + auto indata1 = std::make_shared(precision, data1->get_shape()); auto add = std::make_shared(indata0, indata1); add->set_friendly_name("add"); auto hswish = std::make_shared(add); hswish->set_friendly_name("hswish"); - auto subgraph0 = std::make_shared(NodeVector{sinh0, sinh1}, + auto subgraph0 = std::make_shared(NodeVector{data0, data1}, std::make_shared(NodeVector{add, hswish}, ParameterVector{indata0, indata1})); subgraph0->set_friendly_name("add"); @@ -308,10 +264,8 @@ std::shared_ptr EltwiseTwoResultsFunction::initReference() const { std::shared_ptr TwoInputsAndOutputsFunction::initOriginal() const { auto data0 = std::make_shared(precision, input_shapes[0]); auto data1 = std::make_shared(precision, input_shapes[1]); - auto sin0 = std::make_shared(data0); - auto sin1 = std::make_shared(data1); - auto hswish = std::make_shared(sin0); - auto add = std::make_shared(hswish, sin1); + auto hswish = std::make_shared(data0); + auto add = std::make_shared(hswish, data1); auto relu = std::make_shared(add); auto sin3 = std::make_shared(relu);