diff --git a/docs/snippets/lpt_intel_cpu_plugin.cpp b/docs/snippets/lpt_intel_cpu_plugin.cpp index 402f652f394..14dcfad486f 100644 --- a/docs/snippets/lpt_intel_cpu_plugin.cpp +++ b/docs/snippets/lpt_intel_cpu_plugin.cpp @@ -17,9 +17,9 @@ namespace ngraph { namespace pass { namespace device { -class ConvertOpSet1ToDeviceSpecific: public ngraph::pass::FunctionPass { +class ConvertOpSet1ToDeviceSpecific: public ov::pass::ModelPass { public: - bool run_on_function(std::shared_ptr f) override { + bool run_on_model(const std::shared_ptr& f) override { return true; } }; diff --git a/src/common/conditional_compilation/include/openvino/cc/pass/itt.hpp b/src/common/conditional_compilation/include/openvino/cc/pass/itt.hpp index c0a0092ba30..32de9caa492 100644 --- a/src/common/conditional_compilation/include/openvino/cc/pass/itt.hpp +++ b/src/common/conditional_compilation/include/openvino/cc/pass/itt.hpp @@ -24,6 +24,11 @@ OV_CC_DOMAINS(ov_pass); # define ADD_MATCHER(obj, region, ...) obj->add_matcher(__VA_ARGS__); # define REGISTER_PASS(obj, region, ...) obj.register_pass(__VA_ARGS__); # define REGISTER_DISABLED_PASS(obj, region, ...) obj.register_pass(__VA_ARGS__); + +# define OV_PASS_CALLBACK(matcher) \ + openvino::itt::handle_t m_callback_handle; \ + m_callback_handle = openvino::itt::handle(matcher->get_name()); \ + OV_ITT_SCOPED_TASK(SIMPLE_ov_pass, m_callback_handle) #elif defined(SELECTIVE_BUILD) # define MATCHER_SCOPE_(scope, region) \ @@ -70,6 +75,7 @@ OV_CC_DOMAINS(ov_pass); # define REGISTER_DISABLED_PASS(obj, region, ...) \ OV_PP_CAT(REGISTER_PASS_WITH_FALSE_, OV_CC_SCOPE_IS_ENABLED(OV_PP_CAT3(ov_pass, _, region))) \ (obj, region, __VA_ARGS__) +# define OV_PASS_CALLBACK(matcher) #else # define MATCHER_SCOPE(region) const std::string matcher_name(OV_PP_TOSTRING(region)) @@ -79,6 +85,7 @@ OV_CC_DOMAINS(ov_pass); # define ADD_MATCHER(obj, region, ...) obj->add_matcher(__VA_ARGS__); # define REGISTER_PASS(obj, region, ...) obj.register_pass(__VA_ARGS__); # define REGISTER_DISABLED_PASS(obj, region, ...) obj.register_pass(__VA_ARGS__); +# define OV_PASS_CALLBACK(matcher) #endif #define ADD_MATCHER_FOR_THIS(region, ...) ADD_MATCHER(this, region, __VA_ARGS__) diff --git a/src/common/low_precision_transformations/src/layer_transformation.cpp b/src/common/low_precision_transformations/src/layer_transformation.cpp index c1995f39ab9..6a496f15657 100644 --- a/src/common/low_precision_transformations/src/layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/layer_transformation.cpp @@ -5,7 +5,6 @@ #include #include - #include #include #include @@ -448,9 +447,24 @@ void LayerTransformation::addPattern(ngraph::pass::GraphRewrite& pass, Transform }; // TODO: better name for matcher? required? auto m = std::make_shared(patternRoot, matcher_name); - NGRAPH_SUPPRESS_DEPRECATED_START - pass.add_matcher(m, internal_callback, ngraph::pass::PassProperty::CHANGE_DYNAMIC_STATE); - NGRAPH_SUPPRESS_DEPRECATED_END + auto match_pass = std::make_shared( + m->get_name(), + m, + [m, internal_callback](const std::shared_ptr& node) -> bool { + NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node; + OV_PASS_CALLBACK(m); + if (std::dynamic_pointer_cast(m)->match(node->output(0))) { + NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node; + bool status = internal_callback(*m.get()); + // explicitly clear Matcher state because it holds pointers to matched nodes + m->clear_state(); + return status; + } + m->clear_state(); + return false; + }, + ov::pass::PassProperty::CHANGE_DYNAMIC_STATE); + pass.add_matcher(match_pass); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/low_precision.cpp b/src/common/low_precision_transformations/src/low_precision.cpp index 61904cbab37..0a476b6f436 100644 --- a/src/common/low_precision_transformations/src/low_precision.cpp +++ b/src/common/low_precision_transformations/src/low_precision.cpp @@ -5,7 +5,6 @@ #include "low_precision/low_precision.hpp" #include - #include #include #include @@ -134,9 +133,24 @@ void make_matcher_type_relaxed(ngraph::pass::GraphRewrite* transformation) { }; auto m = std::make_shared(p_node, matcher_name); - NGRAPH_SUPPRESS_DEPRECATED_START - transformation->add_matcher(m, callback, ngraph::pass::PassProperty::CHANGE_DYNAMIC_STATE); - NGRAPH_SUPPRESS_DEPRECATED_END + auto match_pass = std::make_shared( + m->get_name(), + m, + [m, callback](const std::shared_ptr& node) -> bool { + NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node; + if (std::dynamic_pointer_cast(m)->match(node->output(0))) { + NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node; + OV_PASS_CALLBACK(m); + bool status = callback(*m.get()); + // explicitly clear Matcher state because it holds pointers to matched nodes + m->clear_state(); + return status; + } + m->clear_state(); + return false; + }, + ov::pass::PassProperty::CHANGE_DYNAMIC_STATE); + transformation->add_matcher(match_pass); } ngraph::pass::low_precision::TypeRelaxedReplacer::TypeRelaxedReplacer() { diff --git a/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp b/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp index 3432b5ee4a9..2306cd81565 100644 --- a/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp +++ b/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp @@ -80,7 +80,7 @@ public: 0); ngraph::pass::low_precision::TypeRelaxedReplacer pass; - pass.run_on_function(actualFunction); + pass.run_on_model(actualFunction); auto supportedPrecisionsOnActivation = std::vector( {ngraph::pass::low_precision::PrecisionsRestriction::create( @@ -129,7 +129,7 @@ public: }; TEST_P(MarkupAvgPoolPrecisionsTransformation, CompareFunctions) { - ov::pass::InitNodeInfo().run_on_function(actualFunction); + ov::pass::InitNodeInfo().run_on_model(actualFunction); actualFunction->validate_nodes_and_infer_types(); const auto avgPoolOperations = LayerTransformation::get(actualFunction); diff --git a/src/common/transformations/tests/offline_transformations/pruning_test.cpp b/src/common/transformations/tests/offline_transformations/pruning_test.cpp index ec47d1c8eda..e3df714c0bb 100644 --- a/src/common/transformations/tests/offline_transformations/pruning_test.cpp +++ b/src/common/transformations/tests/offline_transformations/pruning_test.cpp @@ -267,7 +267,7 @@ TEST_F(TransformationTestsF, PropagateMasksBasic) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksBasic.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -352,7 +352,7 @@ TEST_F(TransformationTestsF, PropagateMasksDynamicConvolution) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksDynamicConvolution.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; @@ -403,7 +403,7 @@ TEST(TransformationTests, PropagateMasksDynamicReshape) { auto function = std::make_shared(NodeVector{conv2}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksDynamicReshape.svg") - .run_on_function(function); + .run_on_model(function); pass::Manager m; m.register_pass(); @@ -448,7 +448,7 @@ TEST(TransformationTests, PropagateMasksDynamicGroupConvolution) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksDynamicGroupConvolution.svg") - .run_on_function(f); + .run_on_model(f); pass::Manager m; m.register_pass(); @@ -486,7 +486,7 @@ TEST(TransformationTests, PropagateMasksEmpty) { auto f = std::make_shared(NodeVector{conv2}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksEmpty.svg").run_on_function(f); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksEmpty.svg").run_on_model(f); pass::Manager m; m.register_pass(); @@ -583,7 +583,7 @@ TEST_F(TransformationTestsF, PropagateMaskPassThrough) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMaskPassThrough.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -735,7 +735,7 @@ TEST_F(TransformationTestsF, PropagateMasksHardDependencies) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksHardDependencies.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -886,7 +886,7 @@ TEST_F(TransformationTestsF, PropagateMasksQuantizedGroupConvolution) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksQuantizedGroupConvolution.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -1053,7 +1053,7 @@ TEST_F(TransformationTestsF, PropagateMasksQuantizedGroupConvolutionWithShapeOf) if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksQuantizedGroupConvolutionWithShapeOf.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -1185,7 +1185,7 @@ TEST_F(TransformationTestsF, PropagateMasksFakeQuantizePerTensor) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksFakeQuantizePerTensor.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; @@ -1269,7 +1269,7 @@ TEST(TransformationTests, PropagateMasksFakeQuantizePerTensor1DScale) { auto function = std::make_shared(NodeVector{conv2}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksFakeQuantizePerTensor1DScale.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; @@ -1387,7 +1387,7 @@ TEST_F(TransformationTestsF, PropagateMasksFakeQuantizePerChannel) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksFakeQuantizePerChannel.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; // Masks for fq input parammeters didn't saved after @@ -1530,7 +1530,7 @@ TEST_F(TransformationTestsF, TestConcatMaskPropagation) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "TestConcatMaskPropagation.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -1673,7 +1673,7 @@ TEST_F(TransformationTestsF, TestConcatMaskPropagationUp) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "TestConcatMaskPropagationUp.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -1744,7 +1744,7 @@ TEST(TransformationTests, TestConcatMaskPropagationUpEmpty) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "TestConcatMaskPropagationUpEmpty.svg") - .run_on_function(f); + .run_on_model(f); pass::Manager m; m.register_pass(); @@ -1806,7 +1806,7 @@ TEST_F(TransformationTestsF, PruneConvIsClosingAndInGroup) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneConvIsClosingAndInGroup.svg") - .run_on_function(function); + .run_on_model(function); { auto input = std::make_shared(element::f32, inputShapes); auto weights = create_constant_with_zeros( @@ -1922,7 +1922,7 @@ TEST(TransformationTests, PruneBranchingStopOp) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneBranchingStopOp.svg") - .run_on_function(function); + .run_on_model(function); pass::Manager m; m.register_pass(); @@ -1977,7 +1977,7 @@ TEST(TransformationTests, PruneStopOpUp) { auto function = std::make_shared(OutputVector{end_conv}, ParameterVector{input}, "StopOpUp"); if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopOpUp.svg").run_on_function(function); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopOpUp.svg").run_on_model(function); pass::Manager m; m.register_pass(); @@ -2044,8 +2044,7 @@ TEST_F(TransformationTestsF, PruneReducelayerUp) { function_ref = std::make_shared(OutputVector{conv_1}, ParameterVector{input}); } if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneReducelayerUp.svg") - .run_on_function(function); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneReducelayerUp.svg").run_on_model(function); pass::Manager m; m.register_pass(); @@ -2142,7 +2141,7 @@ TEST_F(TransformationTestsF, PruneReduceLayerDown) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneReduceLayerDown.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2194,7 +2193,7 @@ TEST(TransformationTests, PruneStopReducelayerUp) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopReducelayerUp.svg") - .run_on_function(function); + .run_on_model(function); pass::Manager m; m.register_pass(); @@ -2252,7 +2251,7 @@ TEST(TransformationTests, PruneStopReduceLayerDown) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneStopReduceLayerDown.svg") - .run_on_function(function); + .run_on_model(function); pass::Manager m; m.register_pass(); @@ -2327,7 +2326,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUp) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUp.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2438,7 +2437,7 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationReshapeUpWithShapeOf) { const auto postfix = use_shape_of ? "True" : "False"; ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUpWithShapeOf" + postfix + ".svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -2550,7 +2549,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUpShapeSubGraph) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUpShapeSubGraph.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2642,7 +2641,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeExtend) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeExtend.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2749,7 +2748,7 @@ TEST_F(DISABLED_TransformationTestsF, MaskPropagationReshapeDownMul) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeDownMul.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2853,7 +2852,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeDownAdd) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeDownAdd.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2902,7 +2901,7 @@ TEST(TransformationTests, MaskPropagationStopReshapeUp) { auto function = std::make_shared(OutputVector{conv_1}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationStopReshapeUp.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -2959,7 +2958,7 @@ TEST(TransformationTests, MaskPropagationStopReshapeDown) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationStopReshapeDown.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3017,7 +3016,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUnsqueezeUp) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUnsqueezeUp.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3079,7 +3078,7 @@ TEST_F(TransformationTestsF, MaskPropagationReshapeUnsqueezeDown) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapeUnsqueezeDown.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3140,7 +3139,7 @@ TEST(TransformationTests, MaskPropagationWrongDimsElementwise) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationWrongDimsElementwise.svg") - .run_on_function(function); + .run_on_model(function); pass::Manager m; m.register_pass(); @@ -3251,7 +3250,7 @@ TEST_F(TransformationTestsF, PruneSEBlock) { function_ref = std::make_shared(OutputVector{end_conv}, ParameterVector{input}, "SEBlock"); } if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneSEBlock.svg").run_on_function(function); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneSEBlock.svg").run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3343,7 +3342,7 @@ TEST_F(TransformationTestsF, PropagateMasksLinear) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksLinear.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3395,7 +3394,7 @@ TEST(TransformationTests, MaskPropagationMatMulStopEmptyABranch) { auto function = std::make_shared(OutputVector{mul_left}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationMatMulStopEmptyABranch.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3462,7 +3461,7 @@ TEST(TransformationTests, PruneLinearUp) { auto function = std::make_shared(OutputVector{last_linear}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneLinearUp.svg").run_on_function(function); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneLinearUp.svg").run_on_model(function); pass::Manager m; m.register_pass(); @@ -3519,8 +3518,7 @@ TEST(TransformationTests, PruneConvUpShort) { auto function = std::make_shared(OutputVector{last_conv}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneConvUpShort.svg") - .run_on_function(function); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneConvUpShort.svg").run_on_model(function); pass::Manager m; m.register_pass(); @@ -3595,7 +3593,7 @@ TEST_F(TransformationTestsF, MaskPropagationLinearOuterDims) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationLinearOuterDims.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3674,7 +3672,7 @@ TEST(TransformationTests, MaskPropagationStopLinearOuterDims) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationStopLinearOuterDims.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3765,7 +3763,7 @@ TEST_F(TransformationTestsF, PruneMasksMatMulColsStopRowsUp) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneMasksMatMulColsStopRowsUp.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3854,7 +3852,7 @@ TEST_F(TransformationTestsF, PruneMasksMatMulRowsStopColsUp) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PruneMasksMatMulRowsStopColsUp.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -3949,8 +3947,7 @@ TEST_F(TransformationTestsF, PropagateFlattenUp) { function_ref = std::make_shared(NodeVector{linear}, ParameterVector{input}); } if (VISUALIZE_TESTS_TREE) - ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateFlattenUp.svg") - .run_on_function(function); + ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateFlattenUp.svg").run_on_model(function); { pass::Manager m; m.register_pass(); @@ -4025,7 +4022,7 @@ TEST_F(TransformationTestsF, PropagateFlattenDown) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateFlattenDown.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -4084,7 +4081,7 @@ TEST_F(TransformationTestsF, PropagateMasksTranspose) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksTranspose.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -4157,7 +4154,7 @@ TEST_F(TransformationTestsF, PropagateMasksTransposeComplex) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksTransposeComplex.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -4197,7 +4194,7 @@ TEST(TransformationTests, PropagateMasksTransposeStop) { auto function = std::make_shared(NodeVector{last_mul}, ParameterVector{input}); if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksTransposeStop.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -4323,7 +4320,7 @@ TEST_F(DISABLED_TransformationTestsF, PropagateMasksBroadcastedEltwiseWithInputs } if (VISUALIZE_TESTS_TREE) { ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksBroadcastedEltwiseWithInputs.svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -4500,7 +4497,7 @@ TEST_F(TransformationTestsF, PropagateMasksBroadcastedEltwise) { } if (VISUALIZE_TESTS_TREE) { ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "PropagateMasksBroadcastedEltwise.svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -4663,7 +4660,7 @@ TEST_F(TransformationTestsF, MaskPropagationComplexReshape) { } if (VISUALIZE_TESTS_TREE) { ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationComplexReshape.svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -4856,7 +4853,7 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationReshapedPassThroughP) { auto postfix = (add_shape_of) ? "True" : "False"; ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReshapedPassThroughP" + postfix + ".svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -4981,7 +4978,7 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationBroadcastedSameRankEltwiseS auto postfix = (reverse_mul) ? "True" : "False"; ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationBroadcastedSameRankEltwiseSwappedLayoutP" + postfix + ".svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -5028,7 +5025,7 @@ TEST(TransformationTests, MaskPropagationBroadcastedEltwiseInputAndWeightsBroadc if (VISUALIZE_TESTS_TREE) { ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationBroadcastedEltwiseInputAndWeightsBroadcasted.svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -5078,7 +5075,7 @@ TEST(TransformationTests, MaskPropagationBroadcastedEltwiseWrongBroadcastingMode if (VISUALIZE_TESTS_TREE) { ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationBroadcastedEltwiseWrongBroadcastingMode.svg") - .run_on_function(function); + .run_on_model(function); } { pass::Manager m; @@ -5143,7 +5140,7 @@ TEST_F(TransformationTestsF, MaskPropagationMatMulWithSeveralOutputs) { } if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationMatMulWithSeveralOutputs.svg") - .run_on_function(function); + .run_on_model(function); { pass::Manager m; m.register_pass(); @@ -5174,7 +5171,7 @@ TEST(TransformationTests, CheckReshapeWithNoConstInShape) { if (VISUALIZE_TESTS_TREE) ngraph::pass::VisualizeTree(std::string(VISUALIZE_TREE_ROOT) + "CheckReshapeWithNoConstInShape.svg") - .run_on_function(function); + .run_on_model(function); pass::Manager m; m.register_pass(); diff --git a/src/core/include/openvino/pass/graph_rewrite.hpp b/src/core/include/openvino/pass/graph_rewrite.hpp index 42b5247ddee..9ccca3d35ab 100644 --- a/src/core/include/openvino/pass/graph_rewrite.hpp +++ b/src/core/include/openvino/pass/graph_rewrite.hpp @@ -257,14 +257,6 @@ public: return pass; } - OPENVINO_DEPRECATED("Use MatcherPass instead") - void add_matcher(const std::shared_ptr& m, - const graph_rewrite_callback& callback, - const PassPropertyMask& property); - - OPENVINO_DEPRECATED("Use MatcherPass instead") - void add_matcher(const std::shared_ptr& m, const ov::graph_rewrite_callback& callback); - bool run_on_model(const std::shared_ptr& m) override; void set_pass_config(const std::shared_ptr& pass_config) override; diff --git a/src/core/include/openvino/pass/manager.hpp b/src/core/include/openvino/pass/manager.hpp index 40f14d83890..d2d8d06ee8e 100644 --- a/src/core/include/openvino/pass/manager.hpp +++ b/src/core/include/openvino/pass/manager.hpp @@ -76,36 +76,6 @@ public: /// \param new_state Value "true" enables Validate pass run; "false", otherwise void set_per_pass_validation(bool new_state); - /// \brief Callback is a lambda function that can be used by registered transformations. - /// The main purpose of this callback is to provide a way for plugins to disable/enable - /// transformations based on some conditions. In some cases plugins may want not to - /// execute some - /// transformations. - /// For example plugin can disable unpleasant decompositions because of performance - /// reasons for - /// some cases. - /// Callback example: - /// auto callback = [](const std::shared_ptr & node) -> bool { - /// return std::dynamic_pointer_cast(node) != - /// nullptr; - /// }; - /// This callback returns true in case of DepthToSpace operation. So when execution - /// DepthToSpace - /// decomposition pass will check is this decomposition needed or plugin can execute - /// this - /// operation directly. And of course on transformation side we need to have a response - /// for this - /// callback. - /// if (transformation_callback(batch_to_space)) { - /// return false; - /// } - /// \param callback lamda function that returns true in case if node is supported by - /// plugin and - /// transformation is not needed - OPENVINO_DEPRECATED("Please use get_pass_config() to configure transformation pipeline") - void set_callback(const param_callback& callback) { - m_pass_config->set_callback(callback); - } /// \return PassConfig shared object. This object is used for transformations pipeline /// configuration. /// This object allows to disable/enable transformations execution, set callback to diff --git a/src/core/include/openvino/pass/pass.hpp b/src/core/include/openvino/pass/pass.hpp index 44d6339a31b..9cbbce52ba8 100644 --- a/src/core/include/openvino/pass/pass.hpp +++ b/src/core/include/openvino/pass/pass.hpp @@ -61,14 +61,6 @@ public: std::shared_ptr get_pass_config() { return m_pass_config; } - /// \brief Applies callback for given node. By default callback returns false. - /// This method remains here only for backward compatibility and will be removed - /// after all transformations are moved to transformation_callback() method. - /// \return result of callback execution for given node - OPENVINO_DEPRECATED("Please use transformation_callback method instead") - bool m_transformation_callback(const std::shared_ptr& node) { - return m_pass_config->get_callback(get_type_info())(node); - } /// \brief Applies callback for given node. By default callback returns false. /// \param node which will be used inside callback @@ -99,13 +91,7 @@ class OPENVINO_API ModelPass : public PassBase { public: OPENVINO_RTTI("ov::pass::ModelPass"); ~ModelPass() override; - OPENVINO_DEPRECATED("run_on_function() method is deprecated. Please use run_on_model() instead.") - virtual bool run_on_function(std::shared_ptr m); - virtual bool run_on_model(const std::shared_ptr& m); - -private: - bool call_on_function{false}; - bool call_on_model{false}; + virtual bool run_on_model(const std::shared_ptr& m) = 0; }; } // namespace pass diff --git a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp index fb725928079..33a603dd2c4 100644 --- a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp +++ b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp @@ -5,10 +5,11 @@ #pragma once #include -#include -#include -#include +#include "dimension_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/batch_to_space.hpp" +#include "openvino/opsets/opset2.hpp" #include "utils.hpp" namespace ov { @@ -19,6 +20,7 @@ template std::vector shape_infer(const BatchToSpace* op, const std::vector& input_shapes, const std::map& constant_data = {}) { + using namespace ov::util; using ValType = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); @@ -43,13 +45,15 @@ std::vector shape_infer(const BatchToSpace* op, "block_shape and crops inputs must have rank 1. Got: ", inputs_same_ps.rank()); - const ov::Rank data_rank = data_shape.rank(); + const auto data_rank = data_shape.rank(); if (data_rank.is_static()) { constexpr size_t spatial_dim_offset = 1; + const auto data_rank_size = data_shape.size(); + NODE_VALIDATION_CHECK(op, - (data_shape.size() > spatial_dim_offset), + (data_rank_size > spatial_dim_offset), "data input must have rank greater or equal than 2. Got: ", - data_shape.size()); + data_rank_size); if (inputs_same_ps.is_static()) { NODE_VALIDATION_CHECK(op, data_rank.get_length() == inputs_same_ps[0].get_length(), @@ -60,38 +64,52 @@ std::vector shape_infer(const BatchToSpace* op, data_rank); } - auto out_shape = data_shape; - std::vector block_val, crops_begin_val, crops_end_val; + TShape out_shape; + out_shape.reserve(data_rank_size); - if (get_data_as_int64(1, op, block_val, constant_data) && - get_data_as_int64(2, op, crops_begin_val, constant_data) && - get_data_as_int64(3, op, crops_end_val, constant_data)) { + const auto blocks = get_input_const_data_as(op, 1, constant_data); + if (blocks) { NODE_VALIDATION_CHECK(op, - std::none_of(begin(block_val), end(block_val), cmp::Less(1)), + std::none_of(begin(*blocks), end(*blocks), cmp::Less(1)), "Elements of block_shape input must be greater or equal to one."); + const auto divisor = static_cast( + std::accumulate(begin(*blocks), end(*blocks), int64_t(1), std::multiplies())); + out_shape.push_back(data_shape[0] / divisor); + check_divided_result(op, out_shape[0], data_shape[0], divisor); + } else { + out_shape.emplace_back(dim::inf_bound); + } + std::vector crops_begin_val, crops_end_val; + if (get_data_as_int64(2, op, crops_begin_val, constant_data) && + get_data_as_int64(3, op, crops_end_val, constant_data)) { constexpr auto is_invalid_crop = cmp::Less(0); NODE_VALIDATION_CHECK(op, std::none_of(begin(crops_begin_val), end(crops_begin_val), is_invalid_crop) && std::none_of(begin(crops_end_val), end(crops_end_val), is_invalid_crop), "Elements of crops_begin and crops_end inputs must be greater or equal to zero."); - const auto divisor = static_cast( - std::accumulate(begin(block_val), end(block_val), int64_t(1), std::multiplies())); + if (blocks) { + for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { + auto d = data_shape[idx] * static_cast((*blocks)[idx]); + auto crop = static_cast(crops_begin_val[idx] + crops_end_val[idx]); + NODE_VALIDATION_CHECK( + op, + d.is_dynamic() || crop <= d.get_length(), + "crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]"); - out_shape[0] /= divisor; - check_divided_result(op, out_shape[0], data_shape[0], divisor); - - for (auto idx = spatial_dim_offset; idx < out_shape.size(); ++idx) { - out_shape[idx] *= static_cast(block_val[idx]); - auto crop = static_cast(crops_begin_val[idx] + crops_end_val[idx]); - NODE_VALIDATION_CHECK( - op, - out_shape[idx].is_dynamic() || crop <= out_shape[idx].get_length(), - "crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]"); - - out_shape[idx] = out_shape[idx] - crop; + out_shape.push_back(d - crop); + } + } else { + const auto block = Dimension(1, dim::inf_bound); + for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { + auto d = data_shape[idx] * block; + auto crop = static_cast(crops_begin_val[idx] + crops_end_val[idx]); + out_shape.push_back(d - crop); + } } + } else { + out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, Dimension::dynamic()); } return {out_shape}; } else { diff --git a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp index 792c7ddc776..7cc04ec0ac3 100644 --- a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp +++ b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp @@ -5,10 +5,11 @@ #pragma once #include -#include -#include -#include +#include "dimension_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/space_to_batch.hpp" +#include "openvino/opsets/opset2.hpp" #include "utils.hpp" namespace ov { @@ -19,6 +20,7 @@ template std::vector shape_infer(const SpaceToBatch* op, const std::vector& input_shapes, const std::map& constant_data = {}) { + using namespace ov::util; using TVal = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); @@ -45,30 +47,45 @@ std::vector shape_infer(const SpaceToBatch* op, if (data_shape.rank().is_static()) { constexpr size_t spatial_dim_offset = 1; + const auto data_rank_size = data_shape.size(); NODE_VALIDATION_CHECK(op, - (data_shape.size() > spatial_dim_offset), + (data_rank_size > spatial_dim_offset), "The data tensor with rank lower than 2 is not supported (data rank: ", - data_shape.size(), + data_rank_size, ")"); - auto out_shape = data_shape; - std::vector block, pads_begin, pads_end; - if (get_data_as_int64(1, op, block, constant_data) && - get_data_as_int64(2, op, pads_begin, constant_data) && - get_data_as_int64(3, op, pads_end, constant_data)) { - TVal block_prod = std::accumulate(begin(block), end(block), 1, std::multiplies()); + TShape out_shape; + out_shape.reserve(data_rank_size); - out_shape[0] *= block_prod; - for (auto idx = spatial_dim_offset; idx < out_shape.size(); ++idx) { - NODE_VALIDATION_CHECK(op, block[idx] > 0, "block_shape values must be greater than 0"); - if (out_shape[idx].is_static() || out_shape[idx] != Dimension::dynamic()) { - const auto padded_dim = out_shape[idx] + static_cast(pads_begin[idx] + pads_end[idx]); - const auto divisor = static_cast(block[idx]); - out_shape[idx] = padded_dim / divisor; - check_divided_result(op, out_shape[idx], padded_dim, divisor); - } - } + auto blocks = get_input_const_data_as(op, 1, constant_data); + if (blocks) { + TVal block_prod = std::accumulate(begin(*blocks), end(*blocks), 1, std::multiplies()); + out_shape.push_back(data_shape[0] * block_prod); + } else { + out_shape.emplace_back(dim::inf_bound); } + + std::vector pads_begin, pads_end; + if (blocks && get_data_as_int64(2, op, pads_begin, constant_data) && + get_data_as_int64(3, op, pads_end, constant_data)) { + for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { + NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0"); + + const auto padded_dim = data_shape[idx] + static_cast(pads_begin[idx] + pads_end[idx]); + const auto divisor = static_cast((*blocks)[idx]); + + if (padded_dim.get_max_length() == dim::inf_bound) { + out_shape.emplace_back(ceil_div(padded_dim.get_min_length(), divisor), dim::inf_bound); + } else { + out_shape.push_back(padded_dim / divisor); + } + + check_divided_result(op, out_shape[idx], padded_dim, divisor); + } + } else { + out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, dim::inf_bound); + } + return {out_shape}; } else { return {PartialShape::dynamic()}; diff --git a/src/core/src/itt.hpp b/src/core/src/itt.hpp index 8501692d89f..5c527ecee2d 100644 --- a/src/core/src/itt.hpp +++ b/src/core/src/itt.hpp @@ -33,10 +33,6 @@ OV_CC_DOMAINS(ov_opset); */ #if defined(SELECTIVE_BUILD_ANALYZER) # define OV_OP_SCOPE(region) OV_SCOPE(ov_op, region) -# define OV_PASS_CALLBACK(matcher) \ - openvino::itt::handle_t m_callback_handle; \ - m_callback_handle = openvino::itt::handle(matcher->get_name()); \ - OV_ITT_SCOPED_TASK(SIMPLE_ov_pass, m_callback_handle) # define REGISTER_OP(opset_name, op_name) \ OV_ITT_SCOPED_TASK(SIMPLE_ov_opset, openvino::itt::handle(opset_name + "_" + op_name)) # define INSERT_OP(opset_name, op_name, op_namespace) opset.insert() @@ -44,14 +40,12 @@ OV_CC_DOMAINS(ov_opset); # define OV_OP_SCOPE(region) \ if (OV_CC_SCOPE_IS_ENABLED(OV_PP_CAT3(ov_op, _, region)) == 0) \ throw ngraph::ngraph_error(std::string(OV_PP_TOSTRING(OV_PP_CAT3(ov_op, _, region))) + " is disabled!") -# define OV_PASS_CALLBACK(matcher) # define REGISTER_OP(opset_name, op_name) # define INSERT_OP(opset_name, op_name, op_namespace) \ if (OV_CC_SCOPE_IS_ENABLED(OV_PP_CAT4(ov_opset_, opset_name, _, op_name)) == 1) \ opset.insert() #else # define OV_OP_SCOPE(region) OV_ITT_SCOPED_TASK(ov::itt::domains::ov_op, OV_PP_TOSTRING(region)) -# define OV_PASS_CALLBACK(matcher) # define REGISTER_OP(opset_name, op_name) # define INSERT_OP(opset_name, op_name, op_namespace) opset.insert() #endif diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index 6541a90765b..dfac266a0d0 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -78,55 +78,16 @@ bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& vi namespace { bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) { auto data = inputs[0]; - size_t elem_size = data->get_element_type().size(); + const auto elem_size = data->get_element_type().size(); - if (data->get_partial_shape().is_dynamic()) { - return false; - } auto data_shape = data->get_shape(); - auto data_rank = data_shape.size(); - if (data_rank < 2) { - return false; - } - size_t block_values_size = shape_size(inputs[1]->get_shape()); - size_t crops_begin_size = shape_size(inputs[2]->get_shape()); - size_t crops_end_size = shape_size(inputs[3]->get_shape()); - NGRAPH_CHECK(block_values_size == data_rank && crops_begin_size == data_rank && crops_end_size == data_rank, - "Invalid block_shape/crops_begin/crops_end shape with respect to rank of data input"); + auto const block_values_size = shape_size(inputs[1]->get_shape()); const auto* block_values = inputs[1]->get_data_ptr(); const auto* crops_begin_values = inputs[2]->get_data_ptr(); const auto* crops_end_values = inputs[3]->get_data_ptr(); - const bool block_vals_valid = std::all_of(block_values, block_values + block_values_size, [](int64_t elem) { - return elem >= 1; - }); - NGRAPH_CHECK(block_vals_valid, "Invalid element values of block_shape input"); - - const bool crops_begin_vals_valid = - std::all_of(crops_begin_values, crops_begin_values + crops_begin_size, [](int64_t elem) { - return elem >= 0; - }); - const bool crops_end_vals_valid = - std::all_of(crops_end_values, crops_end_values + crops_end_size, [](int64_t elem) { - return elem >= 0; - }); - NGRAPH_CHECK(crops_begin_vals_valid && crops_end_vals_valid, - "Invalid element values of crops_begin/crops_end input/s"); - - const std::size_t block_prod = - std::accumulate(block_values, block_values + block_values_size, int64_t(1), std::multiplies()); - NGRAPH_CHECK(data_shape[0] % block_prod == 0, - "Invalid batch axis of data input with respect to block_shape values"); - - for (size_t i = 0; i < data_rank; i++) { - const bool is_valid_crops_and_shape = - crops_begin_values[i] + crops_end_values[i] <= block_values[i] * static_cast(data_shape[i]); - NGRAPH_CHECK(is_valid_crops_and_shape, - "Invalid crops values (out of bounds) with respect to the shape of data input"); - } - ov::Shape dispersed_shape(1); dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end()); std::vector axes_order(block_values_size + 1); @@ -214,6 +175,26 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, con OV_OP_SCOPE(v1_BatchToSpace_evaluate); NGRAPH_CHECK(validate_host_tensor_vector(inputs, 4)); NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + + if (outputs[0]->get_partial_shape().is_dynamic()) { + std::map constant_data; + std::vector input_shapes; + input_shapes.reserve(inputs.size()); + + for (size_t i = 0; i < inputs.size(); ++i) { + input_shapes.push_back(inputs[i]->get_partial_shape()); + if (input_shapes.back().is_dynamic()) { + return false; + } + constant_data.emplace(i, inputs[i]); + } + + const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + + outputs[0]->set_element_type(inputs[0]->get_element_type()); + outputs[0]->set_shape(output_shape); + } + return batch_to_space_evaluate(outputs, inputs); } diff --git a/src/core/src/op/space_to_batch.cpp b/src/core/src/op/space_to_batch.cpp index a36cf37c752..92c9d95f20a 100644 --- a/src/core/src/op/space_to_batch.cpp +++ b/src/core/src/op/space_to_batch.cpp @@ -75,13 +75,29 @@ bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& vi bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const { + if (outputs[0]->get_partial_shape().is_dynamic()) { + std::map constant_data; + std::vector input_shapes; + input_shapes.reserve(inputs.size()); + + for (size_t i = 0; i < inputs.size(); ++i) { + input_shapes.push_back(inputs[i]->get_partial_shape()); + if (input_shapes.back().is_dynamic()) { + return false; + } + constant_data.emplace(i, inputs[i]); + } + + const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + + outputs[0]->set_element_type(inputs[0]->get_element_type()); + outputs[0]->set_shape(output_shape); + } + const auto& data = inputs[0]; const auto& out = outputs[0]; size_t elem_size = data->get_element_type().size(); - if (data->get_partial_shape().is_dynamic()) { - return false; - } auto data_shape = data->get_shape(); if (!(data->get_shape().size() == 4 || data->get_shape().size() == 5)) { @@ -188,6 +204,7 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_SpaceToBatch_evaluate); + return evaluate_space_to_batch(outputs, inputs); } diff --git a/src/core/src/pass/graph_rewrite.cpp b/src/core/src/pass/graph_rewrite.cpp index 9d86060b2d3..c51b5159267 100644 --- a/src/core/src/pass/graph_rewrite.cpp +++ b/src/core/src/pass/graph_rewrite.cpp @@ -239,37 +239,6 @@ bool ov::pass::GraphRewrite::apply_matcher_passes(std::shared_ptr f, return rewritten; } -void ov::pass::GraphRewrite::add_matcher(const std::shared_ptr& m, - const graph_rewrite_callback& callback, - const PassPropertyMask& property) { - m_matchers.push_back(std::make_shared( - m->get_name(), - m, - [m, callback](const std::shared_ptr& node) -> bool { - NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node; - if (m->match(node->output(0))) { - NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node; - OV_PASS_CALLBACK(m); - bool status = callback(*m.get()); - // explicitly clear Matcher state because it holds pointers to matched nodes - m->clear_state(); - return status; - } - m->clear_state(); - return false; - }, - property)); -} - -void ov::pass::GraphRewrite::add_matcher(const std::shared_ptr& m, - const graph_rewrite_callback& callback) { - NGRAPH_SUPPRESS_DEPRECATED_START - // TODO: before deprecate this function, by default expect the - // callback require static shape. - add_matcher(m, callback, {PassProperty::REQUIRE_STATIC_SHAPE}); - NGRAPH_SUPPRESS_DEPRECATED_END -} - void ov::pass::GraphRewrite::set_pass_config(const std::shared_ptr& rhs) { auto pass_config = get_pass_config(); // We have to preserve disabled passes because in case when we register matchers inside diff --git a/src/core/src/pass/pass.cpp b/src/core/src/pass/pass.cpp index df487deb314..adde933e3c5 100644 --- a/src/core/src/pass/pass.cpp +++ b/src/core/src/pass/pass.cpp @@ -74,23 +74,6 @@ ov::pass::ModelPass::~ModelPass() = default; OPENVINO_SUPPRESS_DEPRECATED_START -bool ov::pass::ModelPass::run_on_model(const std::shared_ptr& m) { - RUN_ON_MODEL_SCOPE(ModelPass); - RunLocker locked(call_on_model); - OPENVINO_ASSERT(!call_on_function, - "Cycle detected. run_on_model() or run_on_function() method should be overridden."); - bool sts = run_on_function(m); - return sts; -} - -bool ov::pass::ModelPass::run_on_function(std::shared_ptr m) { - RUN_ON_FUNCTION_SCOPE(ModelPass); - RunLocker locked(call_on_function); - OPENVINO_ASSERT(!call_on_model, "Cycle detected. run_on_model() or run_on_function() method should be overridden."); - bool sts = run_on_model(m); - return sts; -} - NGRAPH_RTTI_DEFINITION(ngraph::pass::NodePass, "ngraph::pass::NodePass", 0); ngraph::pass::NodePass::~NodePass() = default; diff --git a/src/core/tests/graph_rewrite.cpp b/src/core/tests/graph_rewrite.cpp index 060c9229e98..c7fb67d243c 100644 --- a/src/core/tests/graph_rewrite.cpp +++ b/src/core/tests/graph_rewrite.cpp @@ -107,7 +107,7 @@ TEST(GraphRewriteTest, MatcherPassCallback) { Anchor anchor; anchor.add_matcher()->set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } @@ -118,7 +118,7 @@ TEST(GraphRewriteTest, GraphRewriteCallback) { Anchor anchor; anchor.add_matcher(); anchor.set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } @@ -129,7 +129,7 @@ TEST(GraphRewriteTest, ManagerCallbackDeprecated) { pass::Manager manager; auto anchor = manager.register_pass(); anchor->add_matcher(); - manager.set_callback(get_callback()); + manager.get_pass_config()->set_callback(get_callback()); manager.run_passes(f); ASSERT_EQ(count_ops_of_type(f), 1); @@ -153,7 +153,7 @@ TEST(GraphRewriteTest, ManagerCallback2) { pass::Manager manager; auto anchor = manager.register_pass(); - manager.set_callback(get_callback()); + manager.get_pass_config()->set_callback(get_callback()); manager.run_passes(f); ASSERT_EQ(count_ops_of_type(f), 1); @@ -179,7 +179,7 @@ TEST(GraphRewriteTest, MatcherPassCallbackDerived) { Anchor anchor; anchor.add_matcher()->set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } @@ -228,7 +228,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassCallback) { Anchor anchor; anchor.add_matcher()->set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } @@ -238,7 +238,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassCallbackDerived) { Anchor anchor; anchor.add_matcher()->set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } @@ -249,7 +249,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassOrder1) { Anchor anchor; anchor.add_matcher()->set_callback(get_callback()); anchor.add_matcher()->set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } @@ -260,7 +260,7 @@ TEST(GraphRewriteTest, TypeBasedMatcherPassOrder2) { Anchor anchor; anchor.add_matcher()->set_callback(get_callback()); anchor.add_matcher()->set_callback(get_callback()); - anchor.run_on_function(f); + anchor.run_on_model(f); ASSERT_EQ(count_ops_of_type(f), 1); } diff --git a/src/core/tests/matcher_pass.cpp b/src/core/tests/matcher_pass.cpp index b16b233a766..79be164bae2 100644 --- a/src/core/tests/matcher_pass.cpp +++ b/src/core/tests/matcher_pass.cpp @@ -98,7 +98,7 @@ TEST(pattern, matcher_pass) { pass::GraphRewrite pass; pass.add_matcher(); - pass.run_on_function(f); + pass.run_on_model(f); // Parameter->Relu->Result ASSERT_TRUE(f->get_ops().size() == 3); diff --git a/src/core/tests/pass_config.cpp b/src/core/tests/pass_config.cpp index ff011de1a94..3c9395aedc2 100644 --- a/src/core/tests/pass_config.cpp +++ b/src/core/tests/pass_config.cpp @@ -56,7 +56,7 @@ class TestFunctionPass : public ngraph::pass::FunctionPass { public: NGRAPH_RTTI_DECLARATION; - bool run_on_function(std::shared_ptr f) override { + bool run_on_model(const std::shared_ptr& f) override { pass::Manager manager(get_pass_config()); manager.register_pass(); diff --git a/src/core/tests/pass_manager.cpp b/src/core/tests/pass_manager.cpp index 0940f2ba4a9..a99c5d558d1 100644 --- a/src/core/tests/pass_manager.cpp +++ b/src/core/tests/pass_manager.cpp @@ -37,7 +37,7 @@ namespace { class DummyPass : public pass::FunctionPass { public: DummyPass() {} - bool run_on_function(std::shared_ptr /* f */) override { + bool run_on_model(const std::shared_ptr& /* f */) override { return false; } }; diff --git a/src/core/tests/pattern.cpp b/src/core/tests/pattern.cpp index cd888ed458f..ef5a75ec99c 100644 --- a/src/core/tests/pattern.cpp +++ b/src/core/tests/pattern.cpp @@ -111,9 +111,23 @@ public: }; auto m = make_shared(make_shared(pattern, iconst1)); - NGRAPH_SUPPRESS_DEPRECATED_START - this->add_matcher(m, callback); - NGRAPH_SUPPRESS_DEPRECATED_END + auto match_pass = std::make_shared( + m->get_name(), + m, + [m, callback](const std::shared_ptr& node) -> bool { + NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node; + if (std::dynamic_pointer_cast(m)->match(node->output(0))) { + NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node; + bool status = callback(*m.get()); + // explicitly clear Matcher state because it holds pointers to matched nodes + m->clear_state(); + return status; + } + m->clear_state(); + return false; + }, + ov::pass::PassProperty::REQUIRE_STATIC_SHAPE); + this->add_matcher(match_pass); } void construct_add_zero() { @@ -156,9 +170,23 @@ public: auto add = make_shared(pattern, iconst0); auto m = make_shared(add); - NGRAPH_SUPPRESS_DEPRECATED_START - this->add_matcher(m, callback); - NGRAPH_SUPPRESS_DEPRECATED_END + auto match_pass = std::make_shared( + m->get_name(), + m, + [m, callback](const std::shared_ptr& node) -> bool { + NGRAPH_DEBUG << "Running matcher " << m->get_name() << " on " << node; + if (std::dynamic_pointer_cast(m)->match(node->output(0))) { + NGRAPH_DEBUG << "Matcher " << m->get_name() << " matched " << node; + bool status = callback(*m.get()); + // explicitly clear Matcher state because it holds pointers to matched nodes + m->clear_state(); + return status; + } + m->clear_state(); + return false; + }, + ov::pass::PassProperty::REQUIRE_STATIC_SHAPE); + this->add_matcher(match_pass); } TestGraphRewrite() : GraphRewrite() { diff --git a/src/core/tests/type_prop/batch_to_space.cpp b/src/core/tests/type_prop/batch_to_space.cpp index 97a9fd57b88..f35afb75fc9 100644 --- a/src/core/tests/type_prop/batch_to_space.cpp +++ b/src/core/tests/type_prop/batch_to_space.cpp @@ -388,19 +388,19 @@ TEST(type_prop, batch_to_space_input_interval_shape_block_one) { } TEST(type_prop, batch_to_space_and_space_to_batch) { - auto data = make_shared(element::f32, Shape{4800, 9, 11, 2}); + auto data = make_shared(element::f32, PartialShape{4800, 9, {11, -1}, 2}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), - (Shape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, 11 * 100 - 38 - 38, 2 * 2 - 1})); + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), + (PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1})); auto space_to_batch = make_shared(batch_to_space, block_shape, crops_begin, crops_end); ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_shape(), (Shape{4800, 9, 11, 2})); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (PartialShape{4800, 9, {11, -1}, 2})); } TEST(type_prop, batch_to_space_dynamic_shape_static_rank) { @@ -441,3 +441,37 @@ TEST(type_prop, batch_to_space_default_ctor) { EXPECT_EQ(batch_to_space->get_element_type(), element::i16); EXPECT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); } + +TEST(type_prop, batch_to_space_non_const_inputs) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto crops_begin = make_shared(element::i64, PartialShape{4}); + auto crops_end = make_shared(element::i64, PartialShape{4}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + EXPECT_EQ(batch_to_space->get_element_type(), element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4)); +} + +TEST(type_prop, batch_to_space_block_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + EXPECT_EQ(batch_to_space->get_element_type(), element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {12, -1}, {3, -1}})); +} + +TEST(type_prop, batch_to_space_crops_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); + auto crops_begin = make_shared(element::i64, PartialShape{4}); + auto crops_end = make_shared(element::i64, PartialShape{4}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + EXPECT_EQ(batch_to_space->get_element_type(), element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({10, -1, -1, -1})); +} diff --git a/src/core/tests/type_prop/space_to_batch.cpp b/src/core/tests/type_prop/space_to_batch.cpp index b3d5b9bcbb2..b58ca9ee729 100644 --- a/src/core/tests/type_prop/space_to_batch.cpp +++ b/src/core/tests/type_prop/space_to_batch.cpp @@ -49,7 +49,7 @@ TEST(type_prop, space_to_batch_output_shape_5D) { } TEST(type_prop, space_to_batch_and_batch_to_space) { - auto data = make_shared(element::f32, Shape{2, 100, 1024, 3}); + auto data = make_shared(element::f32, PartialShape{2, {100, -1}, 1024, 3}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); @@ -57,12 +57,12 @@ TEST(type_prop, space_to_batch_and_batch_to_space) { auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_shape(), - (Shape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2})); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), + (PartialShape{2 * 12 * 100 * 2, {(100 + 3 + 5) / 12, -1}, (1024 + 38 + 38) / 100, (3 + 1) / 2})); auto batch_to_space = make_shared(space_to_batch, block_shape, pads_begin, pads_end); ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3})); + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), (PartialShape{2, {100, -1}, 1024, 3})); } TEST(type_prop, space_to_batch_when_space_is_static) { @@ -117,13 +117,13 @@ TEST(type_prop, space_to_batch_when_space_is_dynamic) { TEST(type_prop, space_to_batch_dynamic_shape_static_rank) { auto data = make_shared(element::f32, PartialShape::dynamic(4)); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 2, 0}); auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4)); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {1, -1}, -1})); } TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank) { @@ -151,7 +151,7 @@ TEST(type_prop, space_to_batch_dynamic_rank_shape_block_and_pads_not_const) { } TEST(type_prop, space_to_batch_default_ctor) { - auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, {100, 1024}, 3}); + auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, {100, -1}, 3}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 4, 1}); auto pads_begin = make_shared(element::i64, Shape{4}, vector{1, 1, 2, 0}); auto pads_end = make_shared(element::i64, Shape{4}, vector{1, 1, 6, 0}); @@ -164,7 +164,42 @@ TEST(type_prop, space_to_batch_default_ctor) { EXPECT_EQ(space_to_batch->get_output_size(), 1); EXPECT_EQ(space_to_batch->get_output_element_type(0), element::f32); EXPECT_EQ(space_to_batch->get_output_partial_shape(0), - PartialShape({{2 * 2 * 4, 5 * 2 * 4}, (100 + 2) / 2, {(100 + 2 + 6) / 4, (1024 + 2 + 6) / 4}, 3})); + PartialShape({{2 * 2 * 4, 5 * 2 * 4}, (100 + 2) / 2, {(100 + 2 + 6) / 4, -1}, 3})); +} + +TEST(type_prop, space_to_batch_non_const_inputs) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto pads_begin = make_shared(element::i64, PartialShape{4}); + auto pads_end = make_shared(element::i64, PartialShape{4}); + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + EXPECT_EQ(space_to_batch->get_element_type(), element::f32); + EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4)); +} + +TEST(type_prop, space_to_batch_block_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + EXPECT_EQ(space_to_batch->get_element_type(), element::f32); + EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4)); +} + +TEST(type_prop, space_to_batch_crops_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); + auto pads_begin = make_shared(element::i64, PartialShape{4}); + auto pads_end = make_shared(element::i64, PartialShape{4}); + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + EXPECT_EQ(space_to_batch->get_element_type(), element::f32); + EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape({1000, -1, -1, -1})); } TEST(type_prop, space_to_batch_invalid_element_type_block_shape) { diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp index 2a79c1e35df..077425faf39 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp @@ -39,9 +39,9 @@ public: std::vector>> CalculateRefs() override { // Convert the second input constant precision to i64 to run the reference function if (ngraph::element::Type_t::i8 == secondConstantType) { - ngraph::pass::ConvertPrecision().run_on_function(functionRefs); + ngraph::pass::ConvertPrecision().run_on_model(functionRefs); } else if (ngraph::element::Type_t::bf16 == secondConstantType) { - ngraph::pass::ConvertPrecision().run_on_function(functionRefs); + ngraph::pass::ConvertPrecision().run_on_model(functionRefs); } return LayerTestsUtils::LayerTestsCommon::CalculateRefs(); } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp index 81249706415..ff4a10c4b0a 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp @@ -203,8 +203,8 @@ public: using ngraph::pass::ConvertPrecision; ConcatConvSumInPlaceTest::SetUp(); functionRefs = function->clone(); - ngraph::pass::ConvertPrecision().run_on_function(functionRefs); - ngraph::pass::ConvertPrecision().run_on_function(functionRefs); + ngraph::pass::ConvertPrecision().run_on_model(functionRefs); + ngraph::pass::ConvertPrecision().run_on_model(functionRefs); functionRefs->validate_nodes_and_infer_types(); } }; diff --git a/src/plugins/intel_gpu/src/graph/deconvolution.cpp b/src/plugins/intel_gpu/src/graph/deconvolution.cpp index af71244e3b9..e6a5c90585f 100644 --- a/src/plugins/intel_gpu/src/graph/deconvolution.cpp +++ b/src/plugins/intel_gpu/src/graph/deconvolution.cpp @@ -229,12 +229,6 @@ std::string deconvolution_inst::to_string(deconvolution_node const& node) { auto node_info = node.desc_to_json(); std::stringstream primitive_description; - std::stringstream ss_weights, ss_biases; - - ss_weights << node.weights().id(); - ss_weights << ", count: " << node.weights().get_output_layout().count(); - ss_biases << node.bias().id(); - ss_biases << ", count: " << node.bias().get_output_layout().count(); json_composite deconv_info; deconv_info.add("stride", cldnn::to_string(strd)); @@ -245,6 +239,17 @@ std::string deconvolution_inst::to_string(deconvolution_node const& node) { ud_out_size_info.add("size", desc->output_size.to_string()); deconv_info.add("with_user_defined_output_size", ud_out_size_info); } + std::stringstream ss_weights; + ss_weights << node.weights().id(); + ss_weights << ", count: " << node.weights().get_output_layout().count(); + deconv_info.add("weights", ss_weights.str()); + if (node.bias_term()) { + std::stringstream ss_biases; + ss_biases << node.bias().id(); + ss_biases << ", count: " << node.bias().get_output_layout().count(); + deconv_info.add("bias", ss_biases.str()); + } + node_info->add("deconvolution info", deconv_info); node_info->dump(primitive_description); return primitive_description.str(); diff --git a/src/plugins/intel_gpu/src/graph/program_helpers.cpp b/src/plugins/intel_gpu/src/graph/program_helpers.cpp index d9c9ef7513a..94fa74b248c 100644 --- a/src/plugins/intel_gpu/src/graph/program_helpers.cpp +++ b/src/plugins/intel_gpu/src/graph/program_helpers.cpp @@ -105,6 +105,10 @@ add_fusing_type onednn_add_fusing_helpers::get_add_fusing_type( auto p_layout = p_node.get_output_layout(); auto d_layout = dep_node.get_output_layout(); + if (p_node.is_dynamic() || dep_node.is_dynamic()) { + return add_fusing_type::not_supported; + } + if (is_full_tensor(p_layout) && is_full_tensor(d_layout)) { if (data_type_traits::size_of(p_layout.data_type) == data_type_traits::size_of(d_layout.data_type) && p_layout.format == d_layout.format && p_layout.get_tensor() == d_layout.get_tensor() diff --git a/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp b/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp index b56e043c975..985fe40bf96 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp @@ -1140,7 +1140,10 @@ JitConstants MakeActivationJitConstants(ActivationFunction activation_function, jitConstants.AddConstant(MakeJitConstant(macro_def, "(input)")); break; case ActivationFunction::CEIL: - jitConstants.AddConstant(MakeJitConstant(macro_def, "(ceil(input))")); + if (out_dt == Datatype::F32 || out_dt == Datatype::F16) + jitConstants.AddConstant(MakeJitConstant(macro_def, "(ceil(input))")); + else + jitConstants.AddConstant(MakeJitConstant(macro_def, "(input)")); break; case ActivationFunction::NEGATIVE: jitConstants.AddConstant(MakeJitConstant(macro_def, "(-input)")); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp index 7a4e7f85fe8..db823d71950 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp @@ -117,24 +117,26 @@ bool ScatterNDUpdateKernelRef::Validate(const Params& p, const optional_params& return true; } -static std::string GetInputBlockND(const scatter_nd_update_params& params, size_t num, size_t rank) { +static std::string GetInputBlockND(const scatter_nd_update_params& params, size_t num, size_t dyn_offset, size_t rank) { const auto& input = params.inputs[num]; auto input_dims = input.LogicalDims(); std::reverse(input_dims.begin(), input_dims.end()); + auto dims = input.GetDims(); + std::reverse(dims.begin(), dims.end()); std::vector block_nd(rank + 1); block_nd[rank] = 1; std::vector block_nd_s(rank + 1); block_nd_s[rank] = "1"; - size_t input_offset = num * 6; + size_t input_offset = dyn_offset * 6; for (int32_t idx = rank - 1; idx >= 0; --idx) { block_nd[idx] = input_dims[idx] * block_nd[idx + 1]; - size_t dim_offset = idx < 2 ? idx : idx + 6 - rank; - block_nd_s[idx] = "(" + toCodeString(input.GetDims()[input.GetDims().size() - idx - 1], input_offset + dim_offset) + "*" + block_nd_s[idx + 1] + ")"; + size_t dim_offset = idx < 2 ? idx : (6 - dims.size()) + idx; // convert to 6d bfwzyx idx + block_nd_s[idx] = "(" + toCodeString(dims[idx], input_offset + dim_offset) + "*" + block_nd_s[idx + 1] + ")"; } std::string result; @@ -180,18 +182,24 @@ KernelsData ScatterNDUpdateKernelRef::GetKernelsData(const Params& params, const size_t input0_rank = newParams.inputs[0].LogicalDims().size(); size_t input2_rank = newParams.inputs[2].LogicalDims().size(); cldnn_jit.AddConstant(MakeJitConstant("IS_SECOND_ITER", "true")); - cldnn_jit.AddConstant(MakeJitConstant("INPUT0_BLOCK_ND", GetInputBlockND(newParams, 0, input0_rank))); - cldnn_jit.AddConstant(MakeJitConstant("INPUT1_BLOCK_ND", GetInputBlockND(newParams, 1, newParams.indices_rank - 1))); - cldnn_jit.AddConstant(MakeJitConstant("INPUT2_BLOCK_ND", GetInputBlockND(newParams, 2, input2_rank))); + size_t shape_info_offset = 0; + cldnn_jit.AddConstant(MakeJitConstant("INPUT0_BLOCK_ND", GetInputBlockND(newParams, 0, shape_info_offset, input0_rank))); + if (newParams.inputs[0].is_dynamic()) + shape_info_offset++; + cldnn_jit.AddConstant(MakeJitConstant("INPUT1_BLOCK_ND", GetInputBlockND(newParams, 1, shape_info_offset, newParams.indices_rank - 1))); + if (newParams.inputs[1].is_dynamic()) + shape_info_offset++; + cldnn_jit.AddConstant(MakeJitConstant("INPUT2_BLOCK_ND", GetInputBlockND(newParams, 2, shape_info_offset, input2_rank))); cldnn_jit.AddConstant(MakeJitConstant("INDICES_RANK", newParams.indices_rank)); const auto& ind_input = newParams.inputs[1]; if (ind_input.is_dynamic()) { - size_t last_idx = newParams.indices_rank - 1; - size_t dim_offset = last_idx < 2 ? last_idx : 5; - size_t input_idx = last_idx < 2 ? ind_input.GetDims().size() - last_idx : 0; + auto dims = ind_input.GetDims(); + std::reverse(dims.begin(), dims.end()); - auto indices_last_dim = toCodeString(ind_input.GetDims()[input_idx], 6 + dim_offset); + size_t last_idx = newParams.indices_rank - 1; + size_t dim_offset = last_idx < 2 ? last_idx : last_idx + 6 - newParams.indices_rank; + auto indices_last_dim = toCodeString(dims[last_idx], dim_offset + (newParams.inputs[0].is_dynamic() ? 6 : 0)); cldnn_jit.AddConstant(MakeJitConstant("INDICES_LAST_DIM", indices_last_dim)); } else { cldnn_jit.AddConstant(MakeJitConstant("INDICES_LAST_DIM", dispatchData.indicesLastDim)); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.cpp index b57d3ee3653..129ea68b733 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.cpp @@ -25,8 +25,12 @@ ov::intel_gpu::ConvertAvgPoolingToReduce::ConvertAvgPoolingToReduce() { auto pads_begin = pool->get_pads_begin(); auto pads_end = pool->get_pads_end(); - int64_t rank = pool->get_input_partial_shape(0).size(); - auto input_shape = pool->get_input_shape(0); + auto input = pool->input_value(0); + const auto input_shape = input.get_partial_shape(); + if (input_shape.is_dynamic() || input_shape.rank().is_dynamic()) { + return false; + } + const auto rank = input_shape.rank().get_length(); // Check if input spatial size is same with kernel size. bool has_same_spatial_size = rank > 2 && std::equal(input_shape.end() - (rank - 2), input_shape.end(), kernel.end() - (rank - 2)); // Check if pads are zeros. diff --git a/src/plugins/intel_gpu/tests/test_cases/scatter_nd_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/scatter_nd_update_gpu_test.cpp index 67b5923fb3d..7bcba7be9c0 100644 --- a/src/plugins/intel_gpu/tests/test_cases/scatter_nd_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/scatter_nd_update_gpu_test.cpp @@ -22,6 +22,18 @@ using namespace cldnn; using namespace ::tests; +namespace { +template +T generate_random_val(int min, int max, int k = 8) { + static std::default_random_engine generator(random_seed); + // 1/k is the resolution of the floating point numbers + std::uniform_int_distribution distribution(k * min, k * max); + T val = (T)distribution(generator); + val /= k; + + return val; +} +} struct scatter_nd_update_basic_test_params { @@ -51,17 +63,6 @@ struct scatter_nd_update_random_test : testing::TestWithParam - T generate_random_val(int min, int max, int k = 8) { - static std::default_random_engine generator(random_seed); - // 1/k is the resolution of the floating point numbers - std::uniform_int_distribution distribution(k * min, k * max); - T val = (T)distribution(generator); - val /= k; - - return val; - } - template std::vector generate_unique_indices(const scatter_nd_update_basic_test_params& p) { std::set> unique_indices; @@ -4460,6 +4461,111 @@ TEST(scatter_nd_update_gpu, dynamic) { } } +TEST(scatter_nd_update_gpu, dynamic_5d) { + auto& engine = get_test_engine(); + + auto input1_layout = layout{{ 8, -1, -1, 384}, data_types::f32, format::bfyx }; + auto input2_layout = layout{{-1, -1, -1, -1, -1}, data_types::i32, format::bfzyx }; + auto input3_layout = layout{{-1, -1, -1, 384}, data_types::f32, format::bfyx }; + + topology topology; + topology.add(input_layout("data", input1_layout)); + topology.add(input_layout("indices", input2_layout)); + topology.add(input_layout("updates", input3_layout)); + topology.add(scatter_nd_update("scatter_nd_update", input_info("data"), input_info("indices"), input_info("updates"), 5)); + + ExecutionConfig config; + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + network network(engine, topology, config); + + auto get_expected_res = [](const std::vector& input, + const std::vector& indices, + const std::vector& updates, + ov::Shape input_shape, + ov::Shape indices_shape, + ov::Shape updates_shape) -> std::vector { + size_t count = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); + auto outputs_ref = std::vector(count); + ngraph::runtime::reference::scatterNdUpdate(input.data(), + indices.data(), + updates.data(), + outputs_ref.data(), + input_shape, + indices_shape, + updates_shape); + + return outputs_ref; + }; + + + auto generate_unique_indices = [](ov::Shape data_shape, ov::Shape indices_shape) -> std::vector{ + std::set> unique_indices; + std::vector result; + size_t last_indices_dim = indices_shape.at(indices_shape.size() - 1); + + size_t count = std::accumulate(indices_shape.begin(), indices_shape.end(), 1, std::multiplies()) / last_indices_dim; + + while (unique_indices.size() != count) { + std::vector indices; + for (size_t i = 0; i < last_indices_dim; i++) { + indices.push_back(static_cast(generate_random_val(0, data_shape[i] - 1))); + } + + unique_indices.insert(indices); + } + + std::for_each(unique_indices.begin(), + unique_indices.end(), + [&](const std::vector& indices) { + result.insert(result.end(), indices.begin(), indices.end()); + }); + + return result; + }; + + std::vector> test_shapes = { + { { 8, 3, 1, 384 }, { 1, 3, 1, 384, 4 }, { 1, 3, 1, 384 } }, + { { 8, 3, 2, 384 }, { 1, 3, 1, 384, 4 }, { 1, 3, 1, 384 } }, + }; + + for (auto& shapes : test_shapes) { + ov::Shape in1_shape = shapes[0]; + ov::Shape in2_shape = shapes[1]; + ov::Shape in3_shape = shapes[2]; + auto input1 = engine.allocate_memory({ in1_shape, data_types::f32, format::bfyx }); // Dictionary + auto input2 = engine.allocate_memory({ in2_shape, data_types::i32, format::bfzyx }); // Indexes + auto input3 = engine.allocate_memory({ in3_shape, data_types::f32, format::bfyx }); // Updates + + std::vector input_data = generate_random_1d(input1->count(), 1, 100); + std::vector indices = generate_unique_indices(in1_shape, in2_shape); + std::vector updates = generate_random_1d(input3->count(), 100, 200); + auto expected_res = get_expected_res(input_data, indices, updates, in1_shape, in2_shape, in3_shape); + + set_values(input1, input_data); + set_values(input2, indices); + set_values(input3, updates); + + network.set_input_data("data", input1); + network.set_input_data("indices", input2); + network.set_input_data("updates", input3); + + auto inst = network.get_primitive("scatter_nd_update"); + auto impl = inst->get_impl(); + ASSERT_TRUE(impl != nullptr); + ASSERT_TRUE(impl->is_dynamic()); + + auto outputs = network.execute(); + + auto output = outputs.at("scatter_nd_update").get_memory(); + ASSERT_EQ(output->get_layout().get_partial_shape(), input1->get_layout().get_partial_shape()); + cldnn::mem_lock output_ptr(output, get_test_stream()); + + for (size_t i = 0; i < expected_res.size(); ++i) { + ASSERT_EQ(expected_res[i], output_ptr[i]) << " i = " << i; + } + } +} + #ifdef RUN_ALL_MODEL_CACHING_TESTS TEST_P(scatter_nd_update_random_test, random_cached) { diff --git a/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp b/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp index 3fd1e25ec7a..e9f5ce61384 100644 --- a/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp +++ b/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp @@ -33,7 +33,10 @@ public: void SetUp() override { auto params = GetParam(); function = CreateFunction(params); - inputData = {params.dataTensor.data}; + inputData = {params.dataTensor.data, + params.blockShapeTensor.data, + params.cropsBeginTensor.data, + params.cropsEndTensor.data}; refOutData = {params.expectedTensor.data}; } @@ -61,11 +64,12 @@ public: private: static std::shared_ptr CreateFunction(const BatchToSpaceParams& params) { const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape, params.blockShapeTensor.data.data()); - const auto cropsBegin = std::make_shared(element::i64, params.cropsBeginTensor.shape, params.cropsBeginTensor.data.data()); - const auto cropsEnd = std::make_shared(element::i64, params.cropsEndTensor.shape, params.cropsEndTensor.data.data()); + const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); + const auto cropsBegin = std::make_shared(element::i64, params.cropsBeginTensor.shape); + const auto cropsEnd = std::make_shared(element::i64, params.cropsEndTensor.shape); const auto batchToSpace = std::make_shared(data, blockShape, cropsBegin, cropsEnd); - return std::make_shared(NodeVector {batchToSpace}, ParameterVector {data}); + return std::make_shared(NodeVector{batchToSpace}, + ParameterVector{data, blockShape, cropsBegin, cropsEnd}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp b/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp index 1050f7cd54d..38210a96d95 100644 --- a/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp +++ b/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp @@ -34,7 +34,10 @@ public: void SetUp() override { auto params = GetParam(); function = CreateFunction(params); - inputData = {params.dataTensor.data}; + inputData = {params.dataTensor.data, + params.blockShapeTensor.data, + params.padsBeginTensor.data, + params.padsEndTensor.data}; refOutData = {params.expectedTensor.data}; } @@ -62,11 +65,12 @@ public: private: static std::shared_ptr CreateFunction(const SpaceToBatchParams& params) { const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape, params.blockShapeTensor.data.data()); - const auto padsBegin = std::make_shared(element::i64, params.padsBeginTensor.shape, params.padsBeginTensor.data.data()); - const auto padsEnd = std::make_shared(element::i64, params.padsEndTensor.shape, params.padsEndTensor.data.data()); + const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); + const auto padsBegin = std::make_shared(element::i64, params.padsBeginTensor.shape); + const auto padsEnd = std::make_shared(element::i64, params.padsEndTensor.shape); const auto batchToSpace = std::make_shared(data, blockShape, padsBegin, padsEnd); - return std::make_shared(NodeVector {batchToSpace}, ParameterVector {data}); + return std::make_shared(NodeVector{batchToSpace}, + ParameterVector{data, blockShape, padsBegin, padsEnd}); } }; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp index 18c4b6c0063..ee7ca6e0649 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp @@ -681,7 +681,7 @@ TEST_P(OVExecutableNetworkBaseTest, precisionsAsInOriginalIR) { auto filePrefix = CommonTestUtils::generateTestFilePrefix(); const std::string m_out_xml_path_1 = filePrefix + "precisionsAsInOriginalIR.xml"; const std::string m_out_bin_path_1 = filePrefix + "precisionsAsInOriginalIR.bin"; - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_function(function); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(function); ov::CompiledModel execNet; EXPECT_NO_THROW(execNet = core->compile_model(m_out_xml_path_1, target_device, configuration)); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp index 024c8a3d34a..3109ee6bf3d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp @@ -62,7 +62,7 @@ void AddTransformation::SetUp() { precision, inputShape, param.broadcast, param.fakeQuantize1, param.fakeQuantize2); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(AddTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp index 95acaa2b051..f511ffbc090 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -67,7 +67,7 @@ void ElementwiseBranchSelectionTransformation::SetUp() { param.branch2.fakeQuantizeAfter, param.fakeQuantizeAfter); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } void ElementwiseBranchSelectionTransformation::Run() { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index bd77f795048..49087c50819 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -40,7 +40,7 @@ void FakeQuantizeAndAvgPoolTransformation::SetUp() { inputShape, fakeQuantize); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FakeQuantizeAndAvgPoolTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index b86d33e9af5..e874d741a09 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -39,7 +39,7 @@ void FakeQuantizeAndMaxPoolTransformation::SetUp() { inputShape, fakeQuantize); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FakeQuantizeAndMaxPoolTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index 42e3f413578..f68ed23dc08 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -44,7 +44,7 @@ void FakeQuantizePrecisionSelectionTransformation::SetUp() { testValues.actual.fakeQuantizeOnWeights }); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FakeQuantizePrecisionSelectionTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp index e6a0893a44c..da139265eeb 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp @@ -49,7 +49,7 @@ void FakeQuantizeTransformation::SetUp() { testParams.fakequantize, true); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } void FakeQuantizeTransformation::Run() { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index a72c49b932d..b1221d56c97 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -39,7 +39,7 @@ void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() { inputShape, fakeQuantizeOnData); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FuseFakeQuantizeAndScaleShiftTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp index f7c02dc0899..0dc3d899f79 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp @@ -46,7 +46,7 @@ void FuseFakeQuantizeTransformation::SetUp() { testValues.actual.precisionAfterDequantization, testValues.actual.fakeQuantizeOnData); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FuseFakeQuantizeTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp index 13e88dc976e..9e7a5a4b436 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp @@ -35,7 +35,7 @@ void FuseMultiplyToFakeQuantizeTransformation::SetUp() { testValues.actual.fakeQuantizeOnData, testValues.actual.dequantization); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FuseMultiplyToFakeQuantizeTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp index 211c027bc82..083376eb953 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp @@ -35,7 +35,7 @@ void FuseSubtractToFakeQuantizeTransformation::SetUp() { testValues.actual.fakeQuantizeOnData, testValues.actual.dequantization); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(FuseSubtractToFakeQuantizeTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index 56e0caf1ac5..2ad47f13fb1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -71,7 +71,7 @@ void MatMulTransformation::SetUp() { testValues.inputShape2, testValues.fqOnData2); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } void MatMulTransformation::Run() { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 6542e1da04c..4592efc3343 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -70,7 +70,7 @@ void MatMulWithConstantTransformation::SetUp() { testValues.fqOnWeights, testValues.deqOnWeights); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } void MatMulWithConstantTransformation::Run() { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index af4af2109b0..3e718706c77 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -66,7 +66,7 @@ void MultiplyTransformation::SetUp() { param.fakeQuantizeAfter, param.secondInputIsConstant); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } void MultiplyTransformation::Run() { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp index bb541cd67a5..873a0d6f953 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp @@ -54,7 +54,7 @@ void PReluTransformation::SetUp() { function = ngraph::builder::subgraph::PReluFunction::getOriginal(inputShape, precision, testValues.fakeQuantize); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(PReluTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp index 75ac5037aec..0969a112211 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp @@ -54,7 +54,7 @@ void ReluTransformation::SetUp() { function = ngraph::builder::subgraph::ReluFunction::getOriginal(inputShape, precision, testValues.fakeQuantize); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(ReluTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index 3e2ec51d2e8..a1e791623a3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -75,7 +75,7 @@ void SqueezeTransformation::SetUp() { squeezeParam.fakeQuantize, squeezeParam.squeezeAxes); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(SqueezeTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index 0f457537df3..cce6815222f 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -75,7 +75,7 @@ void UnsqueezeTransformation::SetUp() { unsqueezeParam.fakeQuantize, unsqueezeParam.unsqueezeAxes); - ov::pass::InitNodeInfo().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); } TEST_P(UnsqueezeTransformation, CompareWithRefImpl) { diff --git a/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp b/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp index db3d313d253..bb76016505b 100644 --- a/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp @@ -64,8 +64,8 @@ namespace snippets { "CodegenGelu"); if (useSubgraph) { - ov::pass::InitNodeInfo().run_on_function(function); - ngraph::pass::ConstantFolding().run_on_function(function); + ov::pass::InitNodeInfo().run_on_model(function); + ngraph::pass::ConstantFolding().run_on_model(function); } } diff --git a/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp b/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp index 71f29522cab..ea7befd88f3 100644 --- a/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp +++ b/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp @@ -417,8 +417,8 @@ void LayerTestsCommon::Infer() { } void LayerTestsCommon::ConvertRefsParams() { - ngraph::pass::ConvertPrecision().run_on_function(functionRefs); - ngraph::pass::ConvertPrecision().run_on_function(functionRefs); + ngraph::pass::ConvertPrecision().run_on_model(functionRefs); + ngraph::pass::ConvertPrecision().run_on_model(functionRefs); } std::vector>> LayerTestsCommon::CalculateRefs() { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp index 5b053833df1..c60c694a814 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp @@ -82,7 +82,7 @@ void RandomUniformLayerTest::SetUp() { void RandomUniformLayerTest::ConvertRefsParams() { // we shouldn't use default conversion from f16 to f32 - ngraph::pass::ConvertPrecision().run_on_function( + ngraph::pass::ConvertPrecision().run_on_model( functionRefs); } diff --git a/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py b/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py index 7a67d226d21..e32ecf275b8 100644 --- a/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py +++ b/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py @@ -10,6 +10,7 @@ from defusedxml import defuse_stdlib from utils.conformance_utils import get_logger from utils import stat_update_utils +from utils.constants import OP_CONFORMANCE, API_CONFORMANCE # defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree # in a safe manner without including unsafe xml.etree.ElementTree @@ -83,9 +84,11 @@ def aggregate_test_results(aggregated_results: SubElement, xml_reports: list, re for xml_real_device_entry in xml_results_entry: aggregated_real_device_api_report = aggregated_results_entry.find(xml_real_device_entry.tag) if aggregated_real_device_api_report is None: + stat_update_utils.update_rel_values(xml_results_entry) aggregated_results_entry.append(xml_real_device_entry) continue update_result_node(xml_real_device_entry, aggregated_real_device_api_report) + a = 1 return aggregated_timestamp @@ -95,13 +98,13 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str, output_filenam summary = Element("report") results = SubElement(summary, "results") entity_name = None - if report_type == "OP": + if report_type == OP_CONFORMANCE.lower() or report_type == OP_CONFORMANCE: entity_name = "ops_list" - elif report_type == "API": + elif report_type == API_CONFORMANCE.lower() or report_type == API_CONFORMANCE: entity_name = "api_list" else: raise Exception(f"Error to create aggregated report. Incorrect report type: {report_type}") - + entity_list = SubElement(summary, entity_name) for folder_path in input_folder_paths: @@ -113,9 +116,9 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str, output_filenam continue xml_reports = None - if report_type == "OP": + if report_type == OP_CONFORMANCE.lower() or report_type == OP_CONFORMANCE: xml_reports = glob.glob(os.path.join(folder_path, 'report_op*.xml')) - elif report_type == "API": + elif report_type == API_CONFORMANCE.lower() or report_type == API_CONFORMANCE: xml_reports = glob.glob(os.path.join(folder_path, 'report_api*.xml')) logger.info(f"Num of XML: {len(xml_reports)}") diff --git a/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py b/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py index 49b008750b8..ee0843c5dfd 100644 --- a/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py +++ b/src/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py @@ -6,12 +6,12 @@ import xml.etree.ElementTree as ET from . import conformance_utils def update_rel_values(xml_node: ET.SubElement): - if xml_node is None: + if xml_node is None or len(xml_node.attrib) == 0: return if not "relative_all" in xml_node.attrib: test_cnt = int(xml_node.attrib.get("passed")) + int(xml_node.attrib.get("failed")) + int(xml_node.attrib.get("skipped")) + \ int(xml_node.attrib.get("crashed")) + int(xml_node.attrib.get("hanged")) - xml_node.set("relative_all", test_cnt) + xml_node.set("relative_all", str(test_cnt)) if not "relative_passed" in xml_node.attrib: xml_node.set("relative_passed", xml_node.attrib.get("passed"))