diff --git a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp index 8164c619203..5cd39a4a79a 100644 --- a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp @@ -355,11 +355,7 @@ StatusCode CNNNetworkNGraphImpl::reshape(const std::map(); ssr_manager.run_passes(_ngraph_function); - std::map reshapeShapes; - for (const auto& item : inputShapes) { - reshapeShapes[item.first] = ngraph::PartialShape(item.second); - } - reshape(reshapeShapes); + reshape(inputShapes); } catch (std::exception& ex) { reshape(originalInputShapes); return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what(); @@ -368,7 +364,7 @@ StatusCode CNNNetworkNGraphImpl::reshape(const std::map>& inputShapes, +StatusCode CNNNetworkNGraphImpl::reshape(const std::map& inputShapes, ResponseDesc* responseDesc) noexcept { std::map shapes; for (const auto& shape : inputShapes) diff --git a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp index f0eae70fa90..77f445b92f9 100644 --- a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp +++ b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp @@ -73,8 +73,7 @@ public: virtual void validate(int = 10); - StatusCode reshape(const std::map>& inputShapes, - ResponseDesc* resp) noexcept override; + StatusCode reshape(const std::map& inputShapes, ResponseDesc* resp) noexcept override; StatusCode reshape(const std::map& inputShapes, ResponseDesc* resp) noexcept override; diff --git a/inference-engine/src/inference_engine/src/ie_core.cpp b/inference-engine/src/inference_engine/src/ie_core.cpp index 4dfc8f85de1..5809ec0a6d0 100644 --- a/inference-engine/src/inference_engine/src/ie_core.cpp +++ b/inference-engine/src/inference_engine/src/ie_core.cpp @@ -1298,22 +1298,22 @@ std::map Core::get_versions(const std::string& deviceName) } #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -std::shared_ptr Core::read_model(const std::wstring& modelPath, const std::wstring& binPath) const { +std::shared_ptr Core::read_model(const std::wstring& modelPath, const std::wstring& binPath) const { OV_CORE_CALL_STATEMENT( return _impl->ReadNetwork(ov::util::wstring_to_string(modelPath), ov::util::wstring_to_string(binPath)) .getFunction();); } #endif -std::shared_ptr Core::read_model(const std::string& modelPath, const std::string& binPath) const { +std::shared_ptr Core::read_model(const std::string& modelPath, const std::string& binPath) const { OV_CORE_CALL_STATEMENT(return _impl->ReadNetwork(modelPath, binPath).getFunction();); } -std::shared_ptr Core::read_model(const std::string& model, const ie::Blob::CPtr& weights) const { +std::shared_ptr Core::read_model(const std::string& model, const ie::Blob::CPtr& weights) const { OV_CORE_CALL_STATEMENT(return _impl->ReadNetwork(model, weights).getFunction();); } -ExecutableNetwork Core::compile_model(const std::shared_ptr& model, +ExecutableNetwork Core::compile_model(const std::shared_ptr& model, const std::string& deviceName, const ConfigMap& config) { OV_CORE_CALL_STATEMENT({ @@ -1333,7 +1333,7 @@ ExecutableNetwork Core::compile_model(const std::string& modelPath, }); } -ExecutableNetwork Core::compile_model(const std::shared_ptr& model, +ExecutableNetwork Core::compile_model(const std::shared_ptr& model, const RemoteContext& context, const ConfigMap& config) { OV_CORE_CALL_STATEMENT({ @@ -1382,7 +1382,7 @@ ExecutableNetwork Core::import_model(std::istream& modelStream, const RemoteCont }); } -SupportedOpsMap Core::query_model(const std::shared_ptr& model, +SupportedOpsMap Core::query_model(const std::shared_ptr& model, const std::string& deviceName, const ConfigMap& config) const { OV_CORE_CALL_STATEMENT({ diff --git a/inference-engine/src/multi_device/multi_device_exec_network.cpp b/inference-engine/src/multi_device/multi_device_exec_network.cpp index 778d1ff3f5e..a642b5a44a0 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.cpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.cpp @@ -19,8 +19,6 @@ #include "multi_device_plugin.hpp" #include "ngraph/opsets/opset1.hpp" -#include "ngraph_ops/convolution_ie.hpp" -#include "ngraph_ops/deconvolution_ie.hpp" #include "transformations/utils/utils.hpp" // ------------------------------MultiDeviceExecutableNetwork---------------------------- @@ -38,9 +36,7 @@ std::string GetNetworkPrecision(const InferenceEngine::CNNNetwork &network) { if (std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node)) { + std::dynamic_pointer_cast(node)) { auto layerType = node->input(1).get_element_type().get_type_name(); if (layerType == "f32") return METRIC_VALUE(FP32); diff --git a/inference-engine/src/multi_device/multi_device_plugin.cpp b/inference-engine/src/multi_device/multi_device_plugin.cpp index d3e0dc303c1..1aba9427631 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.cpp +++ b/inference-engine/src/multi_device/multi_device_plugin.cpp @@ -12,8 +12,6 @@ #include #include -#include "ngraph_ops/convolution_ie.hpp" -#include "ngraph_ops/deconvolution_ie.hpp" #include #include @@ -37,9 +35,7 @@ namespace { if (std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node)) { + std::dynamic_pointer_cast(node)) { auto layerType = node->input(1).get_element_type().get_type_name(); if (layerType == "f32") return METRIC_VALUE(FP32); diff --git a/inference-engine/src/transformations/include/transformations/init_node_info.hpp b/inference-engine/src/transformations/include/transformations/init_node_info.hpp index cd56a29aae2..d3a69094494 100644 --- a/inference-engine/src/transformations/include/transformations/init_node_info.hpp +++ b/inference-engine/src/transformations/include/transformations/init_node_info.hpp @@ -12,8 +12,6 @@ #include #include -#include - #include /** @@ -26,7 +24,7 @@ namespace ngraph { */ namespace pass { -class TRANSFORMATIONS_API InitNodeInfo; +class NGRAPH_API InitNodeInfo; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp b/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp index 720ce4f53b1..7ae7646fc4c 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp @@ -19,7 +19,6 @@ #include #include #include -#include namespace ngraph { @@ -29,7 +28,7 @@ namespace ngraph { * @brief FusedName class represents runtime info attribute that stores * all operation names that was fully or partially fused into node */ -class TRANSFORMATIONS_API FusedNames { +class NGRAPH_API FusedNames { private: std::set fused_names; @@ -72,7 +71,7 @@ public: * @brief getFusedNames return string with operation names separated by coma in alphabetical order * @param[in] node The node will be used to get FusedNames attribute */ -TRANSFORMATIONS_API std::string getFusedNames(const std::shared_ptr & node); +NGRAPH_API std::string getFusedNames(const std::shared_ptr & node); /** * @ingroup ie_runtime_attr_api @@ -80,16 +79,16 @@ TRANSFORMATIONS_API std::string getFusedNames(const std::shared_ptr getFusedNamesVector(const std::shared_ptr & node); +NGRAPH_API std::vector getFusedNamesVector(const std::shared_ptr & node); } // namespace ngraph namespace ov { -extern template class TRANSFORMATIONS_API VariantImpl; +extern template class NGRAPH_API VariantImpl; template<> -class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { +class NGRAPH_API VariantWrapper : public VariantImpl { public: OPENVINO_RTTI("fused_names", "0"); @@ -105,7 +104,7 @@ public: }; template <> -class TRANSFORMATIONS_API AttributeAdapter> : public DirectValueAccessor> { +class NGRAPH_API AttributeAdapter> : public DirectValueAccessor> { public: OPENVINO_RTTI("AttributeAdapter>"); AttributeAdapter(std::set& value) : DirectValueAccessor>(value) {} diff --git a/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp b/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp index 0d48a6d15e9..5c461844ffd 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp @@ -17,7 +17,6 @@ #include #include -#include namespace ov { @@ -26,7 +25,7 @@ namespace ov { * @brief PrimitivesPriority class represents runtime info attribute that * can be used for plugins specific primitive choice. */ -class TRANSFORMATIONS_API PrimitivesPriority { +class NGRAPH_API PrimitivesPriority { private: std::string primitives_priority; @@ -53,12 +52,12 @@ public: * @brief getPrimitivesPriority return string with primitive priorities value * @param[in] node The node will be used to get PrimitivesPriority attribute */ -TRANSFORMATIONS_API std::string getPrimitivesPriority(const std::shared_ptr & node); +NGRAPH_API std::string getPrimitivesPriority(const std::shared_ptr & node); -extern template class TRANSFORMATIONS_API VariantImpl; +extern template class NGRAPH_API VariantImpl; template<> -class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { +class NGRAPH_API VariantWrapper : public VariantImpl { public: OPENVINO_RTTI("primitives_priority", "0"); diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/matmul_sr.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/matmul_sr.hpp index 14ca0839c7f..d869d54022c 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/matmul_sr.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/matmul_sr.hpp @@ -7,16 +7,14 @@ #include #include -#include - #include namespace ngraph { namespace pass { -class TRANSFORMATIONS_API ReshapeAMatMul; -class TRANSFORMATIONS_API ReshapeBMatMul; -class TRANSFORMATIONS_API TransposeMatMul; +class NGRAPH_API ReshapeAMatMul; +class NGRAPH_API ReshapeBMatMul; +class NGRAPH_API TransposeMatMul; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/mimic_set_batch_size.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/mimic_set_batch_size.hpp index 1b56a284199..e84c9a075b4 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/mimic_set_batch_size.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/mimic_set_batch_size.hpp @@ -8,21 +8,12 @@ #include #include -#include -#include -#include -#include -#include -#include -#include - -#include #include namespace ngraph { namespace pass { -class TRANSFORMATIONS_API MimicSetBatchSize; +class NGRAPH_API MimicSetBatchSize; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp index 8a99395b773..ec40fcd21f0 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/proposal_scales_stridedslice.hpp @@ -7,15 +7,13 @@ #include #include -#include - #include namespace ngraph { namespace pass { -class TRANSFORMATIONS_API Proposal1Scales; -class TRANSFORMATIONS_API Proposal4Scales; +class NGRAPH_API Proposal1Scales; +class NGRAPH_API Proposal4Scales; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp index 5832343ecec..d6d37520856 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/reshape_to_1D.hpp @@ -7,14 +7,12 @@ #include #include -#include - #include namespace ngraph { namespace pass { -class TRANSFORMATIONS_API ReshapeTo1D; +class NGRAPH_API ReshapeTo1D; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/set_batch_size.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/set_batch_size.hpp index 4131fd0253f..e50af360b59 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/set_batch_size.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/set_batch_size.hpp @@ -7,15 +7,12 @@ #include #include -#include - #include - namespace ngraph { namespace pass { -class TRANSFORMATIONS_API SetBatchSize; +class NGRAPH_API SetBatchSize; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/smart_reshape.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/smart_reshape.hpp index 75a7bd9f7a6..f6830ee32cd 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/smart_reshape.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/smart_reshape.hpp @@ -7,15 +7,12 @@ #include #include -#include - #include - namespace ngraph { namespace pass { -class TRANSFORMATIONS_API SmartReshape; +class NGRAPH_API SmartReshape; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp b/inference-engine/src/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp index 2c002e316ea..683c4062d4e 100644 --- a/inference-engine/src/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp +++ b/inference-engine/src/transformations/include/transformations/smart_reshape/strided_slice_squeeze.hpp @@ -7,16 +7,14 @@ #include #include -#include - #include namespace ngraph { namespace pass { -class TRANSFORMATIONS_API StridedSliceSqueeze; -class TRANSFORMATIONS_API SqueezeStridedSlice; -class TRANSFORMATIONS_API SharedSqueeze; +class NGRAPH_API StridedSliceSqueeze; +class NGRAPH_API SqueezeStridedSlice; +class NGRAPH_API SharedSqueeze; } // namespace pass } // namespace ngraph diff --git a/inference-engine/src/transformations/src/transformations/smart_reshape/mimic_set_batch_size.cpp b/inference-engine/src/transformations/src/transformations/smart_reshape/mimic_set_batch_size.cpp deleted file mode 100644 index c2a6237985b..00000000000 --- a/inference-engine/src/transformations/src/transformations/smart_reshape/mimic_set_batch_size.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "itt.hpp" -#include -#include - -NGRAPH_RTTI_DEFINITION(ngraph::pass::MimicSetBatchSize, "MimicSetBatchSize", 0); - -bool ngraph::pass::MimicSetBatchSize::run_on_function(std::shared_ptr f) { - RUN_ON_FUNCTION_SCOPE(MimicSetBatchSize); - // extracting ratio of out to in 0-index dimension value from the folded function - auto specialized_function = ngraph::clone_function(*f); - ngraph::pass::Manager manager; - manager.register_pass(); - manager.run_passes(specialized_function); - - std::map scale; - for (const auto & node : specialized_function->get_ops()) { - if (const auto & reshape = std::dynamic_pointer_cast(node)) { - const auto in_pshape = reshape->get_input_partial_shape(0), out_pshape = reshape->get_output_partial_shape(0); - if (in_pshape.rank().is_dynamic() || in_pshape.rank().get_length() <= 1 || in_pshape[0].is_dynamic() || - out_pshape.rank().is_dynamic() || out_pshape.rank().get_length() <= 1 || out_pshape[0].is_dynamic()) - continue; - const auto & pattern = std::dynamic_pointer_cast(reshape->get_input_node_shared_ptr(1)); - if (pattern && pattern->cast_vector()[0] > 0) { - scale[reshape->get_friendly_name()] = static_cast(out_pshape[0].get_length()) / static_cast(in_pshape[0].get_length()); - } - } - } - // apply transformation to original function - bool transformed = false; - for (auto & reshape : f->get_ops()) { - if (!is_type(reshape) || !scale.count(reshape->get_friendly_name()) || reshape->get_output_partial_shape(0).rank().is_dynamic()) - continue; - - const auto & shape_of = std::make_shared(reshape->get_input_source_output(0), reshape->get_input_element_type(1)); - const auto & new_input_batch = std::make_shared( - shape_of, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, std::vector{0}), - ngraph::opset5::Constant::create(ngraph::element::i64, {}, std::vector{0})); - - const std::shared_ptr & new_output_batch = std::make_shared( - std::make_shared( - std::make_shared( - std::make_shared(new_input_batch, element::f32), - opset5::Constant::create(element::f32, {1}, {scale[reshape->get_friendly_name()]}))), - reshape->get_input_element_type(1)); - - std::vector non_batch_dims(reshape->get_output_partial_shape(0).rank().get_length() - 1); - std::iota(non_batch_dims.begin(), non_batch_dims.end(), 1); - const auto & non_batch_dims_node = std::make_shared( - reshape->input_value(1), - ngraph::opset5::Constant::create(ngraph::element::i64, {non_batch_dims.size()}, non_batch_dims), - ngraph::opset5::Constant::create(ngraph::element::i64, {}, std::vector{0})); - auto new_reshape_pattern = std::make_shared(OutputVector{new_output_batch, non_batch_dims_node}, 0); - reshape->input(1).replace_source_output(new_reshape_pattern->output(0)); - transformed = true; - } - return transformed; -} diff --git a/inference-engine/src/transformations/src/transformations/smart_reshape/proposal_scales_stridedslice.cpp b/inference-engine/src/transformations/src/transformations/smart_reshape/proposal_scales_stridedslice.cpp deleted file mode 100644 index 4e5c2e1b932..00000000000 --- a/inference-engine/src/transformations/src/transformations/smart_reshape/proposal_scales_stridedslice.cpp +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "itt.hpp" -#include - -#include -#include -#include -#include -#include -#include -#include - -bool crop_scales_for_proposal(const ngraph::pattern::PatternValueMap & pattern_to_output, - std::shared_ptr parameter_label, std::shared_ptr proposal_label) { - const auto & parameter = pattern_to_output.at(parameter_label); - const auto & proposal = pattern_to_output.at(proposal_label).get_node_shared_ptr(); - - auto cropped_scales = std::make_shared( - proposal->input_value(2), - ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}), - ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {parameter.get_partial_shape()[1].get_length()}), - ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}), - std::vector{0}, std::vector{0}); - - proposal->input(2).replace_source_output(cropped_scales->output(0)); - return true; -} - -NGRAPH_RTTI_DEFINITION(ngraph::pass::Proposal1Scales, "Proposal1Scales", 0); - -ngraph::pass::Proposal1Scales::Proposal1Scales() { - MATCHER_SCOPE(Proposal1Scales); - auto parameter_label = ngraph::pattern::wrap_type([](const Output &output) { - const auto & shape = output.get_partial_shape(); - return shape.rank().is_static() && shape.rank().get_length() == 2 && shape[1].is_static() && (shape[1].get_length() == 3 || shape[1].get_length() == 4); - }); - auto reshape_label = ngraph::pattern::wrap_type({parameter_label, ngraph::pattern::wrap_type()}, - [](const Output &output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; }); - auto proposal_label = ngraph::pattern::wrap_type({pattern::any_input(), pattern::any_input(), reshape_label}); - - matcher_pass_callback callback = [parameter_label, proposal_label](pattern::Matcher &m) -> bool { - return crop_scales_for_proposal(m.get_pattern_value_map(), parameter_label, proposal_label); - }; - auto m = std::make_shared(proposal_label, matcher_name); - register_matcher(m, callback); -} - -NGRAPH_RTTI_DEFINITION(ngraph::pass::Proposal4Scales, "Proposal4Scales", 0); - -ngraph::pass::Proposal4Scales::Proposal4Scales() { - MATCHER_SCOPE(Proposal4Scales); - auto parameter_label = ngraph::pattern::wrap_type([](const Output &output) { - const auto & shape = output.get_partial_shape(); - return shape.rank().is_static() && shape.rank().get_length() == 2 && shape[1].is_static() && (shape[1].get_length() == 3 || shape[1].get_length() == 4); - }); - auto reshape_label = ngraph::pattern::wrap_type({parameter_label, ngraph::pattern::wrap_type()}, - [](const Output &output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; }); - auto proposal_label = ngraph::pattern::wrap_type({pattern::any_input(), pattern::any_input(), reshape_label}); - - matcher_pass_callback callback = [parameter_label, proposal_label](pattern::Matcher &m) -> bool { - return crop_scales_for_proposal(m.get_pattern_value_map(), parameter_label, proposal_label); - }; - auto m = std::make_shared(proposal_label, matcher_name); - register_matcher(m, callback); -} diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp index 59d19455011..288731240a3 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp @@ -81,97 +81,6 @@ TEST_F(NGraphReshapeTests, ReshapedDynamicShapeLayout) { ASSERT_FALSE(cnnNetwork.getInputsInfo()["A"]->getInputData()->isDynamic()); } -TEST_F(NGraphReshapeTests, ReshapeBatchReLU) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - { - ngraph::PartialShape shape({2, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - - ngraph->replace_parameter(0, param); - ngraph->validate_nodes_and_infer_types(); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({2, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({2, 3, 22, 22})); -} - -TEST_F(NGraphReshapeTests, ReshapeSpatialReLU) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - { - ngraph::PartialShape shape({1, 3, 25, 25}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - - ngraph->replace_parameter(0, param); - ngraph->validate_nodes_and_infer_types(); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); -} - -TEST_F(NGraphReshapeTests, ReshapeSpatialReLUWithoutReplaceParameter) { - std::shared_ptr ngraph; - { - ngraph::PartialShape shape({1, 3, 22, 22}); - ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto result = std::make_shared(relu); - - ngraph::ParameterVector params = {param}; - ngraph::ResultVector results = {result}; - - ngraph = std::make_shared(results, params); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); - - { - ngraph->get_parameters()[0]->set_partial_shape({1, 3, 25, 25}); - - ngraph->validate_nodes_and_infer_types(); - } - - ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); - ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); -} - - TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLU) { std::shared_ptr ngraph; { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp index 3ca90740b4d..8d403d70091 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/infer_requset_dynamic.cpp @@ -24,6 +24,7 @@ std::shared_ptr getFunction1() { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"Tensor_1"}); auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 4, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(params[0], in2add, ngraph::helpers::EltwiseTypes::ADD); @@ -40,6 +41,7 @@ std::shared_ptr getFunction2() { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"Tensor_1"}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp index 2b2f4d09c00..def04051dcd 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request/infer_request_dynamic.hpp @@ -6,16 +6,19 @@ #include #include +#include #include #include #include #include +#include "functional_test_utils/ov_plugin_cache.hpp" #include "ie_extension.h" #include +#include "openvino/core/shape.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" #include "ngraph_functions/builders.hpp" -#include +#include "transformations/utils/utils.hpp" #include #include #include @@ -30,7 +33,7 @@ namespace BehaviorTestsDefinitions { typedef std::tuple< - std::shared_ptr, // ngraph function + std::shared_ptr, // ov function std::vector, std::vector>>, // input/expected output shapes per inference std::string, // Device name std::map // Config @@ -40,7 +43,7 @@ class InferRequestDynamicTests : public testing::WithParamInterface obj) { - std::shared_ptr func; + std::shared_ptr func; std::vector, std::vector>> inOutShapes; std::string targetDevice; std::map configuration; @@ -69,276 +72,241 @@ protected: void TearDown() override { if (!configuration.empty()) { - PluginCache::get().reset(); + ov::test::PluginCache::get().reset(); } function.reset(); } - std::shared_ptr ie = PluginCache::get().ie(); - std::shared_ptr function; + std::shared_ptr ie = ov::test::PluginCache::get().core(); + std::shared_ptr function; std::string targetDevice; std::map configuration; std::vector, std::vector>> inOutShapes; }; TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) { - const std::string param_name = "Param_1"; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + const std::string tensor_name = "Tensor_1"; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); } TEST_P(InferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) { - const std::string param_name = "Param_1"; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension(0, 5), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + const std::string tensor_name = "Tensor_1"; + std::map shapes; + shapes[tensor_name] = {ov::Dimension(0, 5), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - //ASSERT_NO_THROW(req.SetShape(param_name, {1, 4, 20, 20})); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape({1, 4, 20, 20})); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); - req.Infer(); - req.StartAsync(); - InferenceEngine::StatusCode sts; - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + //ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); + ASSERT_EQ(tensor.get_shape(), refShape); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + req.wait(); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferUpperBoundNetworkWithGetBlob) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension(0, 19), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferUpperBoundNetworkWithGetTensor) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension(0, 19), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - //ASSERT_NO_THROW(req.SetShape(param_name, {1, 4, 20, 20})); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape({1, 4, 20, 20})); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); - req.Infer(); - req.StartAsync(); - InferenceEngine::StatusCode sts; - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + //ASSERT_NO_THROW(req.SetShape(tensor_name, {1, 4, 20, 20})); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); + ASSERT_EQ(tensor.get_shape(), refShape); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + req.wait(); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetBlobLower) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension(2, 3), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorLower) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension(2, 3), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape({1, 4, 20, 20})); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape({1, 4, 20, 20})); // Plugin may or may not throw in case if input tensor has dimensions that are out of bounds - //ASSERT_THROW(req.Infer(), InferenceEngine::Exception); + //ASSERT_THROW(req.infer(), ov::Exception); } -TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetBlobUpper) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension(1, 2), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetTensorUpper) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension(1, 2), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape({3, 4, 20, 20})); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape({3, 4, 20, 20})); // Plugin may or may not throw in case if input tensor has dimensions that are out of bounds - // ASSERT_THROW(req.Infer(), InferenceEngine::Exception); + // ASSERT_THROW(req.infer(), ov::Exception); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob2times) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refShape2 = inOutShapes[1].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - const InferenceEngine::SizeVector refOutShape2 = inOutShapes[1].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetTensor2times) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refShape2 = inOutShapes[1].first; + const ov::Shape refOutShape = inOutShapes[0].second; + const ov::Shape refOutShape2 = inOutShapes[1].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape(refShape)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); - req.Infer(); - req.StartAsync(); - InferenceEngine::StatusCode sts; - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape(refShape)); + ASSERT_EQ(tensor.get_shape(), refShape); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + req.wait(); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape(refShape2)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape2); - req.Infer(); - req.StartAsync(); - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape2); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape(refShape2)); + ASSERT_EQ(tensor.get_shape(), refShape2); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + req.wait(); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape2); } -TEST_P(InferRequestDynamicTests, GetSameBlob2times) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, GetSameTensor2times) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_NO_THROW(blob->setShape(refShape)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor; + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_NO_THROW(tensor.set_shape(refShape)); + ASSERT_EQ(tensor.get_shape(), refShape); + ASSERT_NO_THROW(tensor = req.get_tensor(function->get_parameters().back()->get_friendly_name())); + ASSERT_EQ(tensor.get_shape(), refShape); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetBlob) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refOutShape = inOutShapes[0].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob = make_blob_with_precision({InferenceEngine::Precision::FP32, refShape, InferenceEngine::Layout::NCHW}); - blob->allocate(); - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); - req.Infer(); - req.StartAsync(); - InferenceEngine::StatusCode sts; - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor(ov::element::f32, refShape); + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(req.set_tensor(function->get_parameters().back()->get_friendly_name(), tensor)); + ASSERT_EQ(tensor.get_shape(), refShape); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape); } -TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetBlob2times) { - const std::string param_name = "Param_1"; - const InferenceEngine::SizeVector refShape = inOutShapes[0].first; - const InferenceEngine::SizeVector refShape2 = inOutShapes[1].first; - const InferenceEngine::SizeVector refOutShape = inOutShapes[0].second; - const InferenceEngine::SizeVector refOutShape2 = inOutShapes[1].second; - // Create CNNNetwork from ngrpah::Function - InferenceEngine::CNNNetwork cnnNet(function); - std::map shapes; - shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; - cnnNet.reshape(shapes); - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) { + const std::string tensor_name = "Tensor_1"; + const ov::Shape refShape = inOutShapes[0].first; + const ov::Shape refShape2 = inOutShapes[1].first; + const ov::Shape refOutShape = inOutShapes[0].second; + const ov::Shape refOutShape2 = inOutShapes[1].second; + std::map shapes; + shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20}; + ASSERT_NO_THROW(function->reshape(shapes)); + // Load ov::Function to target plugins + auto execNet = ie->compile_model(function, targetDevice, configuration); // Create InferRequest - InferenceEngine::InferRequest req; - InferenceEngine::Blob::Ptr blob = make_blob_with_precision({InferenceEngine::Precision::FP32, refShape, InferenceEngine::Layout::NCHW}); - blob->allocate(); + ov::runtime::InferRequest req; + ov::runtime::Tensor tensor(ov::element::f32, refShape); - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); - req.Infer(); - req.StartAsync(); - InferenceEngine::StatusCode sts; - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_NO_THROW(req.set_tensor(function->get_parameters().back()->get_friendly_name(), tensor)); + ASSERT_EQ(tensor.get_shape(), refShape); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape); - blob = make_blob_with_precision({InferenceEngine::Precision::FP32, refShape2, InferenceEngine::Layout::NCHW}); - blob->allocate(); - ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refShape2); - req.Infer(); - req.StartAsync(); - sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); - ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); - ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); - ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape2); + tensor = ov::runtime::Tensor(ov::element::f32, refShape2); + ASSERT_NO_THROW(req.set_tensor(function->get_parameters().back()->get_friendly_name(), tensor)); + ASSERT_EQ(tensor.get_shape(), refShape2); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor = req.get_tensor(ngraph::op::util::create_ie_output_name(function->get_results().front()->input_value(0)))); + ASSERT_EQ(tensor.get_shape(), refOutShape2); } } // namespace BehaviorTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp index 171bd02fc4b..f9e14a30b28 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp @@ -43,6 +43,7 @@ inline std::shared_ptr makeSplitConvConcat(std::vector ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"Tensor_1"}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index 62f207f860d..e60f5637705 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -86,9 +86,15 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set_target_properties(ngraph PROPERTIES LINK_FLAGS "/IGNORE:4217,4286") endif() +# some sources are located in ngraph, while headers are in inference_engine_transformations +file(GLOB_RECURSE smart_reshape_srcs ${CMAKE_CURRENT_SOURCE_DIR}/src/pass/smart_reshape/*.cpp) +file(GLOB_RECURSE rt_info_srcs ${CMAKE_CURRENT_SOURCE_DIR}/src/pass/rt_info/*.cpp) set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/src/pass/convert_precision.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/src/pass/convert_fp32_to_fp16.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/src/pass/init_node_info.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/src/op/type_relaxed.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/src/function.cpp" # for SmartReshape + ${smart_reshape_srcs} ${rt_info_srcs} PROPERTIES INCLUDE_DIRECTORIES $) # Defines macro in C++ to load backend plugin diff --git a/ngraph/core/include/openvino/core/function.hpp b/ngraph/core/include/openvino/core/function.hpp index 0b5d16b1359..99fb64511bf 100644 --- a/ngraph/core/include/openvino/core/function.hpp +++ b/ngraph/core/include/openvino/core/function.hpp @@ -23,7 +23,7 @@ namespace ov { /// A user-defined function. -class OPENVINO_API Function { +class OPENVINO_API Function : public std::enable_shared_from_this { public: static constexpr ngraph::DiscreteTypeInfo type_info{"Function", 0}; const ngraph::DiscreteTypeInfo& get_type_info() const { @@ -111,6 +111,8 @@ public: ov::Output input(size_t i) const; ov::Output input(const std::string& tensor_name) const; + void reshape(const std::map& partial_shapes); + /// Return the element type of output i const ngraph::element::Type& get_output_element_type(size_t i) const; diff --git a/ngraph/core/src/function.cpp b/ngraph/core/src/function.cpp index c0a2b4deed4..8d55d51b621 100644 --- a/ngraph/core/src/function.cpp +++ b/ngraph/core/src/function.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "itt.hpp" #include "ngraph/graph_util.hpp" @@ -14,10 +16,15 @@ #include "ngraph/ops.hpp" #include "ngraph/opsets/opset7.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/core/attribute_visitor.hpp" #include "openvino/core/except.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/op/parameter.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/op/util/variable_context.hpp" #include "openvino/op/util/variable_extension.hpp" +#include "openvino/pass/manager.hpp" +#include "transformations/smart_reshape/smart_reshape.hpp" using namespace std; @@ -640,3 +647,66 @@ ov::Output ov::Function::input(const std::string& tensor_name) { } throw ov::Exception("Input for tensor name " + tensor_name + " was not found."); } + +void ov::Function::reshape(const std::map& partial_shapes) { + if (partial_shapes.empty()) + return; + + const auto& params = get_parameters(); + std::unordered_map> tensor_param_map; + + // Check that we need to do reshape only if input shapes will be changed + bool need_reshape = false; + for (const auto& partial_shape : partial_shapes) { + bool shape_is_used = false; + + for (const auto& param : params) { + const auto& tensor_names = param->get_output_tensor(0).get_names(); + + if (tensor_names.count(partial_shape.first)) { + shape_is_used = true; + tensor_param_map[partial_shape.first] = param; + if (param->get_output_partial_shape(0).is_dynamic() || + param->get_output_partial_shape(0) != partial_shape.second) { + need_reshape = true; + } + break; + } + } + + OPENVINO_ASSERT(shape_is_used, + "PartialShape for tensor with name '", + partial_shape.first, + "' is not used in ov::Function::reshape"); + } + + if (!need_reshape) + return; + + // save original parameters shape + std::map original_input_shapes; + for (const auto& param : params) { + std::string any_tensor_name = *param->get_output_tensor(0).get_names().begin(); + original_input_shapes[any_tensor_name] = param->get_output_partial_shape(0); + } + + auto reshape_only = [&](const std::map& pshapes) { + for (const auto& pshape : pshapes) { + tensor_param_map[pshape.first]->set_partial_shape(pshape.second); + } + + validate_nodes_and_infer_types(); + }; + + try { + ov::pass::Manager ssr_manager; + ssr_manager.register_pass(); + ssr_manager.run_passes(shared_from_this()); + + reshape_only(partial_shapes); + } catch (std::exception& ex) { + // restore shapes to original ones + reshape_only(original_input_shapes); + throw ex; + } +} diff --git a/inference-engine/src/transformations/src/transformations/init_node_info.cpp b/ngraph/core/src/pass/init_node_info.cpp similarity index 68% rename from inference-engine/src/transformations/src/transformations/init_node_info.cpp rename to ngraph/core/src/pass/init_node_info.cpp index 3069146c587..055eeeb0754 100644 --- a/inference-engine/src/transformations/src/transformations/init_node_info.cpp +++ b/ngraph/core/src/pass/init_node_info.cpp @@ -2,56 +2,54 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "itt.hpp" #include "transformations/init_node_info.hpp" -#include "transformations/rt_info/fused_names_attribute.hpp" -#include "transformations/rt_info/primitives_priority_attribute.hpp" #include -#include - #include #include #include +#include + +#include "itt.hpp" +#include "transformations/rt_info/fused_names_attribute.hpp" +#include "transformations/rt_info/primitives_priority_attribute.hpp" NGRAPH_RTTI_DEFINITION(ngraph::pass::InitNodeInfo, "InitNodeInfo", 0); bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr f) { - RUN_ON_FUNCTION_SCOPE(InitNodeInfo); - std::vector > attributes { - std::make_shared >(ngraph::FusedNames()) - }; + // TODO: enable conditional compile + // RUN_ON_FUNCTION_SCOPE(InitNodeInfo); + std::vector> attributes{ + std::make_shared>(ngraph::FusedNames())}; using VariantCreator = std::function(const std::string&)>; - std::map update_attributes { - {"PrimitivesPriority", - [](const std::string & value) -> std::shared_ptr { - return std::make_shared >(ov::PrimitivesPriority(value)); - } - } - }; + std::map update_attributes{ + {"PrimitivesPriority", [](const std::string& value) -> std::shared_ptr { + return std::make_shared>(ov::PrimitivesPriority(value)); + }}}; - for (auto & node : f->get_ops()) { + for (auto& node : f->get_ops()) { // Recursively apply transformation for sub-graph based operations if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { if (auto sub_graph = sub_graph_node->get_function()) { run_on_function(sub_graph); } } - auto & rtInfo = node->get_rt_info(); + auto& rtInfo = node->get_rt_info(); // Default attributes initialization - for (auto & attr : attributes) { + for (auto& attr : attributes) { // Skip initialization if attribute has been already set - if (rtInfo.count(attr->get_type_info())) continue; + if (rtInfo.count(attr->get_type_info())) + continue; if (auto init_attr = attr->init(node)) { rtInfo[attr->get_type_info()] = init_attr; } } // Convert manually set attributes to appropriate VariantWrapper class instances // all manually set attributes must belong to VariantWrapper class - for (auto & attr : update_attributes) { + for (auto& attr : update_attributes) { if (rtInfo.count(attr.first)) { - if (auto variant_string = std::dynamic_pointer_cast >(rtInfo[attr.first])) { + if (auto variant_string = std::dynamic_pointer_cast>(rtInfo[attr.first])) { rtInfo.erase(attr.first); auto res = attr.second(variant_string->get()); rtInfo[res->get_type_info()] = res; diff --git a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp b/ngraph/core/src/pass/rt_info/fused_names_attribute.cpp similarity index 68% rename from inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp rename to ngraph/core/src/pass/rt_info/fused_names_attribute.cpp index 7f69f5c531c..e3315e9d817 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp +++ b/ngraph/core/src/pass/rt_info/fused_names_attribute.cpp @@ -2,23 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include +#include "transformations/rt_info/fused_names_attribute.hpp" +#include + +#include +#include +#include #include #include - -#include "transformations/rt_info/fused_names_attribute.hpp" +#include using namespace ngraph; using namespace ov; std::string FusedNames::getNames() const { std::string res; - for (auto &name : fused_names) { + for (auto& name : fused_names) { res += (res.empty() ? name : "," + name); } return res; @@ -28,58 +28,62 @@ std::vector FusedNames::getVectorNames() const { return std::vector(fused_names.begin(), fused_names.end()); } -void FusedNames::fuseWith(const FusedNames &names) { - for (const auto & name : names.fused_names) { +void FusedNames::fuseWith(const FusedNames& names) { + for (const auto& name : names.fused_names) { fused_names.insert(name); } } -std::string ngraph::getFusedNames(const std::shared_ptr &node) { - const auto &rtInfo = node->get_rt_info(); +std::string ngraph::getFusedNames(const std::shared_ptr& node) { + const auto& rtInfo = node->get_rt_info(); using FusedNamesWrapper = VariantWrapper; - if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) return {}; + if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) + return {}; - const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static()); + const auto& attr = rtInfo.at(FusedNamesWrapper::get_type_info_static()); FusedNames fusedNames = ov::as_type_ptr(attr)->get(); return fusedNames.getNames(); } -std::vector ngraph::getFusedNamesVector(const std::shared_ptr &node) { - if (!node) return {}; +std::vector ngraph::getFusedNamesVector(const std::shared_ptr& node) { + if (!node) + return {}; - const auto &rtInfo = node->get_rt_info(); + const auto& rtInfo = node->get_rt_info(); using FusedNamesWrapper = VariantWrapper; - if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) return {}; + if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) + return {}; - const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static()); + const auto& attr = rtInfo.at(FusedNamesWrapper::get_type_info_static()); FusedNames fusedNames = ov::as_type_ptr(attr)->get(); return fusedNames.getVectorNames(); } template class ov::VariantImpl; -std::shared_ptr VariantWrapper::merge(const ngraph::NodeVector & nodes) { +std::shared_ptr VariantWrapper::merge(const ngraph::NodeVector& nodes) { FusedNames mergedNames; - for (auto &node : nodes) { - const auto &rtInfo = node->get_rt_info(); + for (auto& node : nodes) { + const auto& rtInfo = node->get_rt_info(); - if (!rtInfo.count(VariantWrapper::get_type_info_static())) continue; + if (!rtInfo.count(VariantWrapper::get_type_info_static())) + continue; const auto attr = rtInfo.at(VariantWrapper::get_type_info_static()); - if (auto fusedNames = std::dynamic_pointer_cast >(attr)) { + if (auto fusedNames = std::dynamic_pointer_cast>(attr)) { mergedNames.fuseWith(fusedNames->get()); } } - return std::make_shared >(mergedNames); + return std::make_shared>(mergedNames); } -std::shared_ptr VariantWrapper::init(const std::shared_ptr & node) { - return std::make_shared > (FusedNames(node->get_friendly_name())); +std::shared_ptr VariantWrapper::init(const std::shared_ptr& node) { + return std::make_shared>(FusedNames(node->get_friendly_name())); } -bool VariantWrapper::visit_attributes(AttributeVisitor &visitor) { +bool VariantWrapper::visit_attributes(AttributeVisitor& visitor) { visitor.on_attribute("value", m_value.fused_names); return true; } \ No newline at end of file diff --git a/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp b/ngraph/core/src/pass/rt_info/primitives_priority_attribute.cpp similarity index 72% rename from inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp rename to ngraph/core/src/pass/rt_info/primitives_priority_attribute.cpp index e3809945e37..601a3e5ea0d 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp +++ b/ngraph/core/src/pass/rt_info/primitives_priority_attribute.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include - -#include -#include -#include - #include "transformations/rt_info/primitives_priority_attribute.hpp" -#include "ngraph_ops/convolution_ie.hpp" -#include "ngraph_ops/deconvolution_ie.hpp" + +#include + +#include +#include +#include +#include +#include +#include +#include using namespace ov; using namespace ngraph; @@ -23,27 +21,26 @@ std::string PrimitivesPriority::getPrimitivesPriority() const { return primitives_priority; } -std::string ov::getPrimitivesPriority(const std::shared_ptr &node) { - const auto &rtInfo = node->get_rt_info(); +std::string ov::getPrimitivesPriority(const std::shared_ptr& node) { + const auto& rtInfo = node->get_rt_info(); using PrimitivesPriorityWrapper = VariantWrapper; - if (!rtInfo.count(PrimitivesPriorityWrapper::get_type_info_static())) return ""; + if (!rtInfo.count(PrimitivesPriorityWrapper::get_type_info_static())) + return ""; - const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::get_type_info_static()); + const auto& attr = rtInfo.at(PrimitivesPriorityWrapper::get_type_info_static()); PrimitivesPriority pp = ov::as_type_ptr(attr)->get(); return pp.getPrimitivesPriority(); } template class ov::VariantImpl; -std::shared_ptr VariantWrapper::merge(const ngraph::NodeVector & nodes) { - auto isConvolutionBased = [](const std::shared_ptr & node) -> bool { +std::shared_ptr VariantWrapper::merge(const ngraph::NodeVector& nodes) { + auto isConvolutionBased = [](const std::shared_ptr& node) -> bool { if (std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node)) { + std::dynamic_pointer_cast(node)) { return true; } return false; @@ -51,10 +48,11 @@ std::shared_ptr VariantWrapper::merge(const std::set unique_pp; - for (auto &node : nodes) { + for (auto& node : nodes) { if (isConvolutionBased(node)) { std::string pp = getPrimitivesPriority(node); - if (!pp.empty()) unique_pp.insert(pp); + if (!pp.empty()) + unique_pp.insert(pp); } } @@ -66,14 +64,14 @@ std::shared_ptr VariantWrapper::merge(const if (unique_pp.size() == 1) { final_primitives_priority = *unique_pp.begin(); } - return std::make_shared >(PrimitivesPriority(final_primitives_priority)); + return std::make_shared>(PrimitivesPriority(final_primitives_priority)); } -std::shared_ptr VariantWrapper::init(const std::shared_ptr & node) { +std::shared_ptr VariantWrapper::init(const std::shared_ptr& node) { throw ngraph_error(std::string(get_type_info()) + " has no default initialization."); } -bool VariantWrapper::visit_attributes(AttributeVisitor &visitor) { +bool VariantWrapper::visit_attributes(AttributeVisitor& visitor) { visitor.on_attribute("value", m_value.primitives_priority); return true; } diff --git a/inference-engine/src/transformations/src/transformations/smart_reshape/matmul_sr.cpp b/ngraph/core/src/pass/smart_reshape/matmul_sr.cpp similarity index 59% rename from inference-engine/src/transformations/src/transformations/smart_reshape/matmul_sr.cpp rename to ngraph/core/src/pass/smart_reshape/matmul_sr.cpp index e987d4bc444..3532694536b 100644 --- a/inference-engine/src/transformations/src/transformations/smart_reshape/matmul_sr.cpp +++ b/ngraph/core/src/pass/smart_reshape/matmul_sr.cpp @@ -2,43 +2,45 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "itt.hpp" #include "transformations/smart_reshape/matmul_sr.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include "itt.hpp" #include "transformations/smart_reshape/utils.hpp" -#include -#include - -#include -#include -#include -#include -#include - -bool relax_hc_reshape_followed_by_matmul(const ngraph::pattern::PatternValueMap & pattern_to_output, - const std::shared_ptr & matmul_label, - const std::shared_ptr & reshape_label, - const std::shared_ptr & other_input_label, - const std::shared_ptr & reshape_pattern_label, +bool relax_hc_reshape_followed_by_matmul(const ngraph::pattern::PatternValueMap& pattern_to_output, + const std::shared_ptr& matmul_label, + const std::shared_ptr& reshape_label, + const std::shared_ptr& other_input_label, + const std::shared_ptr& reshape_pattern_label, bool reshape_is_A_input) { - const auto & reshape_rank = pattern_to_output.at(reshape_label).get_partial_shape().rank(); - const auto & matmul = std::dynamic_pointer_cast(pattern_to_output.at(matmul_label).get_node_shared_ptr()); + const auto& reshape_rank = pattern_to_output.at(reshape_label).get_partial_shape().rank(); + const auto& matmul = + std::dynamic_pointer_cast(pattern_to_output.at(matmul_label).get_node_shared_ptr()); if (!matmul || reshape_rank.is_dynamic() || reshape_rank.get_length() != 2) return false; - const auto &shape_source = pattern_to_output.at(other_input_label); + const auto& shape_source = pattern_to_output.at(other_input_label); if (ngraph::is_type(shape_source.get_node_shared_ptr()) || - ngraph::is_type(shape_source.get_node_shared_ptr())) + ngraph::is_type(shape_source.get_node_shared_ptr())) // avoiding loop creation return false; - const auto & raw_idx = reshape_is_A_input ? (matmul->get_transpose_b() ? -1 : -2) : (matmul->get_transpose_a() ? -2 : -1); - const auto & idx = ngraph::normalize_axes(matmul->description(), {raw_idx}, reshape_rank); - const auto & C = ngraph::op::util::node_to_get_shape_value_of_indices_from_shape_source(shape_source, idx); - const auto & N = ngraph::opset4::Constant::create(ngraph::element::i64, {1}, {-1}); - const auto & pattern_vector = reshape_is_A_input ? - (matmul->get_transpose_a() ? ngraph::OutputVector({C, N}) : ngraph::OutputVector({N, C})) : - (matmul->get_transpose_b() ? ngraph::OutputVector({N, C}) : ngraph::OutputVector({C, N})); - const auto & new_reshape_pattern = std::make_shared(pattern_vector, 0); + const auto& raw_idx = + reshape_is_A_input ? (matmul->get_transpose_b() ? -1 : -2) : (matmul->get_transpose_a() ? -2 : -1); + const auto& idx = ngraph::normalize_axes(matmul->description(), {raw_idx}, reshape_rank); + const auto& C = ngraph::op::util::node_to_get_shape_value_of_indices_from_shape_source(shape_source, idx); + const auto& N = ngraph::opset4::Constant::create(ngraph::element::i64, {1}, {-1}); + const auto& pattern_vector = + reshape_is_A_input ? (matmul->get_transpose_a() ? ngraph::OutputVector({C, N}) : ngraph::OutputVector({N, C})) + : (matmul->get_transpose_b() ? ngraph::OutputVector({N, C}) : ngraph::OutputVector({C, N})); + const auto& new_reshape_pattern = std::make_shared(pattern_vector, 0); auto reshape_pattern = pattern_to_output.at(reshape_pattern_label).get_node_shared_ptr(); new_reshape_pattern->set_friendly_name(reshape_pattern->get_friendly_name()); @@ -50,60 +52,76 @@ bool relax_hc_reshape_followed_by_matmul(const ngraph::pattern::PatternValueMap NGRAPH_RTTI_DEFINITION(ngraph::pass::ReshapeAMatMul, "ReshapeAMatMul", 0); ngraph::pass::ReshapeAMatMul::ReshapeAMatMul() { - MATCHER_SCOPE(ReshapeAMatMul); + // TODO: enable conditional compile + // MATCHER_SCOPE(ReshapeAMatMul); auto other_input_label = pattern::any_input(); auto reshape_input_label = pattern::any_input(); auto reshape_pattern_label = pattern::any_input(); auto reshape_label = ngraph::pattern::wrap_type({reshape_input_label, reshape_pattern_label}); auto matmul_label = ngraph::pattern::wrap_type({reshape_label, other_input_label}); - matcher_pass_callback callback = [=](pattern::Matcher &m) -> bool { - const auto & pattern_to_output = m.get_pattern_value_map(); - return relax_hc_reshape_followed_by_matmul(pattern_to_output, matmul_label, reshape_label, other_input_label, reshape_pattern_label, true); + matcher_pass_callback callback = [=](pattern::Matcher& m) -> bool { + const auto& pattern_to_output = m.get_pattern_value_map(); + return relax_hc_reshape_followed_by_matmul(pattern_to_output, + matmul_label, + reshape_label, + other_input_label, + reshape_pattern_label, + true); }; - auto m = std::make_shared(matmul_label, matcher_name); + auto m = std::make_shared(matmul_label /*, matcher_name */); register_matcher(m, callback); } NGRAPH_RTTI_DEFINITION(ngraph::pass::ReshapeBMatMul, "ReshapeBMatMul", 0); ngraph::pass::ReshapeBMatMul::ReshapeBMatMul() { - MATCHER_SCOPE(ReshapeBMatMul); + // TODO: enable conditional compile + // MATCHER_SCOPE(ReshapeBMatMul); auto other_input_label = pattern::any_input(); auto reshape_input_label = pattern::any_input(); auto reshape_pattern_label = pattern::any_input(); auto reshape_label = ngraph::pattern::wrap_type({reshape_input_label, reshape_pattern_label}); auto matmul_label = ngraph::pattern::wrap_type({other_input_label, reshape_label}); - matcher_pass_callback callback = [=](pattern::Matcher &m) -> bool { - const auto & pattern_to_output = m.get_pattern_value_map(); - return relax_hc_reshape_followed_by_matmul(pattern_to_output, matmul_label, reshape_label, other_input_label, reshape_pattern_label, false); + matcher_pass_callback callback = [=](pattern::Matcher& m) -> bool { + const auto& pattern_to_output = m.get_pattern_value_map(); + return relax_hc_reshape_followed_by_matmul(pattern_to_output, + matmul_label, + reshape_label, + other_input_label, + reshape_pattern_label, + false); }; - auto m = std::make_shared(matmul_label, matcher_name); + auto m = std::make_shared(matmul_label /*, matcher_name */); register_matcher(m, callback); } NGRAPH_RTTI_DEFINITION(ngraph::pass::TransposeMatMul, "TransposeMatMul", 0); ngraph::pass::TransposeMatMul::TransposeMatMul() { - MATCHER_SCOPE(TransposeMatMul); + // TODO: enable conditional compile + // MATCHER_SCOPE(TransposeMatMul); auto matmul_label = ngraph::pattern::wrap_type(); - matcher_pass_callback callback = [=](pattern::Matcher &m) -> bool { - const auto & pattern_to_output = m.get_pattern_value_map(); - auto matmul = std::dynamic_pointer_cast(pattern_to_output.at(matmul_label).get_node_shared_ptr()); + matcher_pass_callback callback = [=](pattern::Matcher& m) -> bool { + const auto& pattern_to_output = m.get_pattern_value_map(); + auto matmul = + std::dynamic_pointer_cast(pattern_to_output.at(matmul_label).get_node_shared_ptr()); if (!matmul) return false; auto transpose_is_fusable = [](const std::shared_ptr& input) { - const auto & input_rank = input->get_output_partial_shape(0).rank(); + const auto& input_rank = input->get_output_partial_shape(0).rank(); if (input_rank.is_static() && input_rank.get_length() >= 2) { if (auto transpose = std::dynamic_pointer_cast(input)) { - if (auto order = std::dynamic_pointer_cast(transpose->get_input_node_shared_ptr(1))) { - const auto & order_vector = order->cast_vector(); + if (auto order = + std::dynamic_pointer_cast(transpose->get_input_node_shared_ptr(1))) { + const auto& order_vector = order->cast_vector(); std::vector fusable_order(input_rank.get_length()); std::iota(fusable_order.begin(), fusable_order.end(), 0); - std::swap(fusable_order[input_rank.get_length() - 1], fusable_order[input_rank.get_length() - 2]); + std::swap(fusable_order[input_rank.get_length() - 1], + fusable_order[input_rank.get_length() - 2]); return order_vector == fusable_order; } } @@ -138,6 +156,6 @@ ngraph::pass::TransposeMatMul::TransposeMatMul() { } return false; }; - auto m = std::make_shared(matmul_label, matcher_name); + auto m = std::make_shared(matmul_label /*, matcher_name */); register_matcher(m, callback); } diff --git a/ngraph/core/src/pass/smart_reshape/mimic_set_batch_size.cpp b/ngraph/core/src/pass/smart_reshape/mimic_set_batch_size.cpp new file mode 100644 index 00000000000..6daba04c4d3 --- /dev/null +++ b/ngraph/core/src/pass/smart_reshape/mimic_set_batch_size.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +#include "itt.hpp" + +NGRAPH_RTTI_DEFINITION(ngraph::pass::MimicSetBatchSize, "MimicSetBatchSize", 0); + +bool ngraph::pass::MimicSetBatchSize::run_on_function(std::shared_ptr f) { + // TODO: enable conditional compile + // RUN_ON_FUNCTION_SCOPE(MimicSetBatchSize); + // extracting ratio of out to in 0-index dimension value from the folded function + auto specialized_function = ngraph::clone_function(*f); + ngraph::pass::Manager manager; + manager.register_pass(); + manager.run_passes(specialized_function); + + std::map scale; + for (const auto& node : specialized_function->get_ops()) { + if (const auto& reshape = std::dynamic_pointer_cast(node)) { + const auto in_pshape = reshape->get_input_partial_shape(0), + out_pshape = reshape->get_output_partial_shape(0); + if (in_pshape.rank().is_dynamic() || in_pshape.rank().get_length() <= 1 || in_pshape[0].is_dynamic() || + out_pshape.rank().is_dynamic() || out_pshape.rank().get_length() <= 1 || out_pshape[0].is_dynamic()) + continue; + const auto& pattern = std::dynamic_pointer_cast(reshape->get_input_node_shared_ptr(1)); + if (pattern && pattern->cast_vector()[0] > 0) { + scale[reshape->get_friendly_name()] = + static_cast(out_pshape[0].get_length()) / static_cast(in_pshape[0].get_length()); + } + } + } + // apply transformation to original function + bool transformed = false; + for (auto& reshape : f->get_ops()) { + if (!is_type(reshape) || !scale.count(reshape->get_friendly_name()) || + reshape->get_output_partial_shape(0).rank().is_dynamic()) + continue; + + const auto& shape_of = + std::make_shared(reshape->get_input_source_output(0), reshape->get_input_element_type(1)); + const auto& new_input_batch = std::make_shared( + shape_of, + ngraph::opset5::Constant::create(ngraph::element::i64, {1}, std::vector{0}), + ngraph::opset5::Constant::create(ngraph::element::i64, {}, std::vector{0})); + + const std::shared_ptr& new_output_batch = std::make_shared( + std::make_shared(std::make_shared( + std::make_shared(new_input_batch, element::f32), + opset5::Constant::create(element::f32, {1}, {scale[reshape->get_friendly_name()]}))), + reshape->get_input_element_type(1)); + + std::vector non_batch_dims(reshape->get_output_partial_shape(0).rank().get_length() - 1); + std::iota(non_batch_dims.begin(), non_batch_dims.end(), 1); + const auto& non_batch_dims_node = std::make_shared( + reshape->input_value(1), + ngraph::opset5::Constant::create(ngraph::element::i64, {non_batch_dims.size()}, non_batch_dims), + ngraph::opset5::Constant::create(ngraph::element::i64, {}, std::vector{0})); + auto new_reshape_pattern = + std::make_shared(OutputVector{new_output_batch, non_batch_dims_node}, 0); + reshape->input(1).replace_source_output(new_reshape_pattern->output(0)); + transformed = true; + } + return transformed; +} diff --git a/ngraph/core/src/pass/smart_reshape/proposal_scales_stridedslice.cpp b/ngraph/core/src/pass/smart_reshape/proposal_scales_stridedslice.cpp new file mode 100644 index 00000000000..3722e06783d --- /dev/null +++ b/ngraph/core/src/pass/smart_reshape/proposal_scales_stridedslice.cpp @@ -0,0 +1,83 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include + +#include "itt.hpp" + +bool crop_scales_for_proposal(const ngraph::pattern::PatternValueMap& pattern_to_output, + std::shared_ptr parameter_label, + std::shared_ptr proposal_label) { + const auto& parameter = pattern_to_output.at(parameter_label); + const auto& proposal = pattern_to_output.at(proposal_label).get_node_shared_ptr(); + + auto cropped_scales = std::make_shared( + proposal->input_value(2), + ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}), + ngraph::opset5::Constant::create(ngraph::element::i64, + ngraph::Shape{1}, + {parameter.get_partial_shape()[1].get_length()}), + ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}), + std::vector{0}, + std::vector{0}); + + proposal->input(2).replace_source_output(cropped_scales->output(0)); + return true; +} + +NGRAPH_RTTI_DEFINITION(ngraph::pass::Proposal1Scales, "Proposal1Scales", 0); + +ngraph::pass::Proposal1Scales::Proposal1Scales() { + // TODO: enable conditional compile + // MATCHER_SCOPE(Proposal1Scales); + auto parameter_label = ngraph::pattern::wrap_type([](const Output& output) { + const auto& shape = output.get_partial_shape(); + return shape.rank().is_static() && shape.rank().get_length() == 2 && shape[1].is_static() && + (shape[1].get_length() == 3 || shape[1].get_length() == 4); + }); + auto reshape_label = ngraph::pattern::wrap_type( + {parameter_label, ngraph::pattern::wrap_type()}, + [](const Output& output) { + return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; + }); + auto proposal_label = + ngraph::pattern::wrap_type({pattern::any_input(), pattern::any_input(), reshape_label}); + + matcher_pass_callback callback = [parameter_label, proposal_label](pattern::Matcher& m) -> bool { + return crop_scales_for_proposal(m.get_pattern_value_map(), parameter_label, proposal_label); + }; + auto m = std::make_shared(proposal_label /*, matcher_name */); + register_matcher(m, callback); +} + +NGRAPH_RTTI_DEFINITION(ngraph::pass::Proposal4Scales, "Proposal4Scales", 0); + +ngraph::pass::Proposal4Scales::Proposal4Scales() { + // TODO: enable conditional compile + // MATCHER_SCOPE(Proposal4Scales); + auto parameter_label = ngraph::pattern::wrap_type([](const Output& output) { + const auto& shape = output.get_partial_shape(); + return shape.rank().is_static() && shape.rank().get_length() == 2 && shape[1].is_static() && + (shape[1].get_length() == 3 || shape[1].get_length() == 4); + }); + auto reshape_label = ngraph::pattern::wrap_type( + {parameter_label, ngraph::pattern::wrap_type()}, + [](const Output& output) { + return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; + }); + auto proposal_label = + ngraph::pattern::wrap_type({pattern::any_input(), pattern::any_input(), reshape_label}); + + matcher_pass_callback callback = [parameter_label, proposal_label](pattern::Matcher& m) -> bool { + return crop_scales_for_proposal(m.get_pattern_value_map(), parameter_label, proposal_label); + }; + auto m = std::make_shared(proposal_label /*, matcher_name */); + register_matcher(m, callback); +} diff --git a/inference-engine/src/transformations/src/transformations/smart_reshape/reshape_to_1D.cpp b/ngraph/core/src/pass/smart_reshape/reshape_to_1D.cpp similarity index 52% rename from inference-engine/src/transformations/src/transformations/smart_reshape/reshape_to_1D.cpp rename to ngraph/core/src/pass/smart_reshape/reshape_to_1D.cpp index 03916cd3fd9..8979d21c1c3 100644 --- a/inference-engine/src/transformations/src/transformations/smart_reshape/reshape_to_1D.cpp +++ b/ngraph/core/src/pass/smart_reshape/reshape_to_1D.cpp @@ -2,26 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "itt.hpp" -#include - -#include #include #include #include #include +#include + +#include "itt.hpp" NGRAPH_RTTI_DEFINITION(ngraph::pass::ReshapeTo1D, "ReshapeTo1D", 0); ngraph::pass::ReshapeTo1D::ReshapeTo1D() { - MATCHER_SCOPE(ReshapeTo1D); - auto reshape_label = ngraph::pattern::wrap_type({pattern::any_input(), ngraph::pattern::wrap_type()}, - [](const Output & output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; }); + // TODO: enable conditional compile + // MATCHER_SCOPE(ReshapeTo1D); + auto reshape_label = ngraph::pattern::wrap_type( + {pattern::any_input(), ngraph::pattern::wrap_type()}, + [](const Output& output) { + return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; + }); - matcher_pass_callback callback = [](pattern::Matcher &m) -> bool { - m.get_match_root()->input(1).replace_source_output(ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {-1})); + matcher_pass_callback callback = [](pattern::Matcher& m) -> bool { + m.get_match_root()->input(1).replace_source_output( + ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {-1})); return true; }; - auto m = std::make_shared(reshape_label, matcher_name); + auto m = std::make_shared(reshape_label /*, matcher_name*/); register_matcher(m, callback); } diff --git a/inference-engine/src/transformations/src/transformations/smart_reshape/set_batch_size.cpp b/ngraph/core/src/pass/smart_reshape/set_batch_size.cpp similarity index 87% rename from inference-engine/src/transformations/src/transformations/smart_reshape/set_batch_size.cpp rename to ngraph/core/src/pass/smart_reshape/set_batch_size.cpp index 4256cc895eb..d0258bd2fe6 100644 --- a/inference-engine/src/transformations/src/transformations/smart_reshape/set_batch_size.cpp +++ b/ngraph/core/src/pass/smart_reshape/set_batch_size.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include -#include - -#include #include +#include +#include +#include +#include #include #include #include @@ -17,8 +15,9 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::SetBatchSize, "SetBatchSize", 0); bool ngraph::pass::SetBatchSize::run_on_function(std::shared_ptr f) { - RUN_ON_FUNCTION_SCOPE(SetBatchSize); - OV_ITT_SCOPED_TASK(itt::domains::IETransform, "ngraph::pass::SetBatchSize"); + // TODO: enable conditional compile + // RUN_ON_FUNCTION_SCOPE(SetBatchSize); + OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "ngraph::pass::SetBatchSize"); ngraph::pass::Manager manager; // This pass must be called first in pipeline @@ -30,4 +29,3 @@ bool ngraph::pass::SetBatchSize::run_on_function(std::shared_ptr - #include - #include +#include +#include #include #include -#include #include #include -#include + +#include "itt.hpp" NGRAPH_RTTI_DEFINITION(ngraph::pass::SmartReshape, "SmartReshape", 0); bool ngraph::pass::SmartReshape::run_on_function(std::shared_ptr f) { - RUN_ON_FUNCTION_SCOPE(SmartReshape); + // TODO: enable conditional compile + // RUN_ON_FUNCTION_SCOPE(SmartReshape); ngraph::pass::Manager static_manager; // This pass must be called first in pipeline static_manager.register_pass(); diff --git a/inference-engine/src/transformations/src/transformations/smart_reshape/strided_slice_squeeze.cpp b/ngraph/core/src/pass/smart_reshape/strided_slice_squeeze.cpp similarity index 61% rename from inference-engine/src/transformations/src/transformations/smart_reshape/strided_slice_squeeze.cpp rename to ngraph/core/src/pass/smart_reshape/strided_slice_squeeze.cpp index cb03cd9b88e..f6a30598c58 100644 --- a/inference-engine/src/transformations/src/transformations/smart_reshape/strided_slice_squeeze.cpp +++ b/ngraph/core/src/pass/smart_reshape/strided_slice_squeeze.cpp @@ -3,24 +3,26 @@ // #include -#include - -#include #include #include #include #include +#include +#include NGRAPH_RTTI_DEFINITION(ngraph::pass::StridedSliceSqueeze, "ngraph::pass::StridedSliceSqueeze", 0); ngraph::pass::StridedSliceSqueeze::StridedSliceSqueeze() { - MATCHER_SCOPE(StridedSliceSqueeze); + // TODO: enable conditional compile + // MATCHER_SCOPE(StridedSliceSqueeze); auto ss_label = ngraph::pattern::wrap_type(pattern::consumers_count(1)); - auto squeeze_label = ngraph::pattern::wrap_type({ss_label, ngraph::pattern::wrap_type()}); + auto squeeze_label = + ngraph::pattern::wrap_type({ss_label, ngraph::pattern::wrap_type()}); - matcher_pass_callback callback = [](pattern::Matcher &m) -> bool { - const auto & squeeze = m.get_match_root(); - const auto & const_axes = std::dynamic_pointer_cast(squeeze->get_input_node_shared_ptr(1)); + matcher_pass_callback callback = [](pattern::Matcher& m) -> bool { + const auto& squeeze = m.get_match_root(); + const auto& const_axes = + std::dynamic_pointer_cast(squeeze->get_input_node_shared_ptr(1)); auto slice = std::dynamic_pointer_cast(squeeze->get_input_node_shared_ptr(0)); if (!const_axes || !slice) return false; @@ -36,25 +38,36 @@ ngraph::pass::StridedSliceSqueeze::StridedSliceSqueeze() { auto strides_vec = strides->cast_vector(); auto begin_mask = slice->get_begin_mask(); auto end_mask = slice->get_end_mask(); - auto new_axis_mask = slice->get_new_axis_mask().empty() ? std::vector(begin_mask.size(), 0) : slice->get_new_axis_mask(); - auto shrink_axis_mask = slice->get_shrink_axis_mask().empty() ? std::vector(begin_mask.size(), 0) : slice->get_shrink_axis_mask(); - auto ellipsis_mask = slice->get_ellipsis_mask().empty() ? std::vector(begin_mask.size(), 0) : slice->get_ellipsis_mask(); + auto new_axis_mask = slice->get_new_axis_mask().empty() ? std::vector(begin_mask.size(), 0) + : slice->get_new_axis_mask(); + auto shrink_axis_mask = slice->get_shrink_axis_mask().empty() ? std::vector(begin_mask.size(), 0) + : slice->get_shrink_axis_mask(); + auto ellipsis_mask = slice->get_ellipsis_mask().empty() ? std::vector(begin_mask.size(), 0) + : slice->get_ellipsis_mask(); - auto is_zero_vec = [](const std::vector & mask){ return std::all_of(mask.begin(), mask.end(), [](const int64_t& i){ return i == 0; }); }; + auto is_zero_vec = [](const std::vector& mask) { + return std::all_of(mask.begin(), mask.end(), [](const int64_t& i) { + return i == 0; + }); + }; if (!is_zero_vec(new_axis_mask) || !is_zero_vec(shrink_axis_mask) || !is_zero_vec(ellipsis_mask)) return false; - if (!std::all_of(strides_vec.begin(), strides_vec.end(), [](const int64_t& i){ return i == 1; })) + if (!std::all_of(strides_vec.begin(), strides_vec.end(), [](const int64_t& i) { + return i == 1; + })) return false; - const auto & axes = normalize_axes(squeeze->description(), const_axes->cast_vector(), squeeze->get_input_partial_shape(0).rank()); - for (const auto & axis : axes) { - if (begin_mask[axis]) { // corresponding dimension of the begin input is ignored. starting from 0 + const auto& axes = normalize_axes(squeeze->description(), + const_axes->cast_vector(), + squeeze->get_input_partial_shape(0).rank()); + for (const auto& axis : axes) { + if (begin_mask[axis]) { // corresponding dimension of the begin input is ignored. starting from 0 begin_vec[axis] = 0; end_vec[axis] = 1; begin_mask[axis] = 0; end_mask[axis] = 0; - } else { // corresponding dimension of the begin input is used for slicing start - if (begin_vec[axis] == -1) { // slicing the latest slice + } else { // corresponding dimension of the begin input is used for slicing start + if (begin_vec[axis] == -1) { // slicing the latest slice end_mask[axis] = 1; } else { end_vec[axis] = begin_vec[axis] + 1; @@ -65,32 +78,40 @@ ngraph::pass::StridedSliceSqueeze::StridedSliceSqueeze() { } auto new_slice = std::make_shared( - slice->input_value(0), - opset5::Constant::create(element::i64, {begin_vec.size()}, begin_vec), - opset5::Constant::create(element::i64, {end_vec.size()}, end_vec), - opset5::Constant::create(element::i64, {strides_vec.size()}, strides_vec), - begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask); + slice->input_value(0), + opset5::Constant::create(element::i64, {begin_vec.size()}, begin_vec), + opset5::Constant::create(element::i64, {end_vec.size()}, end_vec), + opset5::Constant::create(element::i64, {strides_vec.size()}, strides_vec), + begin_mask, + end_mask, + new_axis_mask, + shrink_axis_mask, + ellipsis_mask); replace_node(squeeze, new_slice); new_slice->set_friendly_name(slice->get_friendly_name()); copy_runtime_info(slice, new_slice); return true; }; - auto m = std::make_shared(squeeze_label, matcher_name); + auto m = std::make_shared(squeeze_label /*, matcher_name */); register_matcher(m, callback); } NGRAPH_RTTI_DEFINITION(ngraph::pass::SqueezeStridedSlice, "ngraph::pass::SqueezeStridedSlice", 0); ngraph::pass::SqueezeStridedSlice::SqueezeStridedSlice() { - MATCHER_SCOPE(SqueezeStridedSlice); + // TODO: enable conditional compile + // MATCHER_SCOPE(SqueezeStridedSlice); auto squeeze_label = ngraph::pattern::wrap_type( - {pattern::any_input(), ngraph::pattern::wrap_type()}, pattern::consumers_count(1)); - auto ss_label = ngraph::pattern::wrap_type({squeeze_label, pattern::any_input(), pattern::any_input(), pattern::any_input()}); + {pattern::any_input(), ngraph::pattern::wrap_type()}, + pattern::consumers_count(1)); + auto ss_label = ngraph::pattern::wrap_type( + {squeeze_label, pattern::any_input(), pattern::any_input(), pattern::any_input()}); - matcher_pass_callback callback = [](pattern::Matcher &m) -> bool { + matcher_pass_callback callback = [](pattern::Matcher& m) -> bool { auto slice = std::dynamic_pointer_cast(m.get_match_root()); auto squeeze = slice->get_input_node_shared_ptr(0); - const auto & const_axes = std::dynamic_pointer_cast(squeeze->get_input_node_shared_ptr(1)); + const auto& const_axes = + std::dynamic_pointer_cast(squeeze->get_input_node_shared_ptr(1)); if (!const_axes || !slice) return false; @@ -105,19 +126,30 @@ ngraph::pass::SqueezeStridedSlice::SqueezeStridedSlice() { auto strides_vec = strides->cast_vector(); auto begin_mask = slice->get_begin_mask(); auto end_mask = slice->get_end_mask(); - auto new_axis_mask = slice->get_new_axis_mask().empty() ? std::vector(begin_mask.size(), 0) : slice->get_new_axis_mask(); - auto shrink_axis_mask = slice->get_shrink_axis_mask().empty() ? std::vector(begin_mask.size(), 0) : slice->get_shrink_axis_mask(); - auto ellipsis_mask = slice->get_ellipsis_mask().empty() ? std::vector(begin_mask.size(), 0) : slice->get_ellipsis_mask(); + auto new_axis_mask = slice->get_new_axis_mask().empty() ? std::vector(begin_mask.size(), 0) + : slice->get_new_axis_mask(); + auto shrink_axis_mask = slice->get_shrink_axis_mask().empty() ? std::vector(begin_mask.size(), 0) + : slice->get_shrink_axis_mask(); + auto ellipsis_mask = slice->get_ellipsis_mask().empty() ? std::vector(begin_mask.size(), 0) + : slice->get_ellipsis_mask(); - auto is_zero_vec = [](const std::vector & mask){ return std::all_of(mask.begin(), mask.end(), [](const int64_t& i){ return i == 0; }); }; + auto is_zero_vec = [](const std::vector& mask) { + return std::all_of(mask.begin(), mask.end(), [](const int64_t& i) { + return i == 0; + }); + }; if (!is_zero_vec(new_axis_mask) || !is_zero_vec(shrink_axis_mask) || !is_zero_vec(ellipsis_mask)) return false; - if (!std::all_of(strides_vec.begin(), strides_vec.end(), [](const int64_t& i){ return i == 1; })) + if (!std::all_of(strides_vec.begin(), strides_vec.end(), [](const int64_t& i) { + return i == 1; + })) return false; - auto axes = normalize_axes(squeeze->description(), const_axes->cast_vector(), squeeze->get_input_partial_shape(0).rank()); + auto axes = normalize_axes(squeeze->description(), + const_axes->cast_vector(), + squeeze->get_input_partial_shape(0).rank()); std::sort(axes.begin(), axes.end()); - for (const auto & axis : axes) { + for (const auto& axis : axes) { begin_vec.insert(begin_vec.begin() + axis, 0); end_vec.insert(end_vec.begin() + axis, 1); strides_vec.insert(strides_vec.begin() + axis, 1); @@ -129,24 +161,29 @@ ngraph::pass::SqueezeStridedSlice::SqueezeStridedSlice() { } auto new_slice = std::make_shared( - slice->get_input_node_shared_ptr(0)->input_value(0), - opset5::Constant::create(element::i64, {begin_vec.size()}, begin_vec), - opset5::Constant::create(element::i64, {end_vec.size()}, end_vec), - opset5::Constant::create(element::i64, {strides_vec.size()}, strides_vec), - begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask); + slice->get_input_node_shared_ptr(0)->input_value(0), + opset5::Constant::create(element::i64, {begin_vec.size()}, begin_vec), + opset5::Constant::create(element::i64, {end_vec.size()}, end_vec), + opset5::Constant::create(element::i64, {strides_vec.size()}, strides_vec), + begin_mask, + end_mask, + new_axis_mask, + shrink_axis_mask, + ellipsis_mask); replace_node(slice, new_slice); new_slice->set_friendly_name(slice->get_friendly_name()); copy_runtime_info(slice, new_slice); return true; }; - auto m = std::make_shared(ss_label, matcher_name); + auto m = std::make_shared(ss_label /*, matcher_name */); register_matcher(m, callback); } NGRAPH_RTTI_DEFINITION(ngraph::pass::SharedSqueeze, "ngraph::pass::SharedSqueeze", 0); -bool squeezes_perform_the_same(std::shared_ptr lhs, std::shared_ptr rhs) { +bool squeezes_perform_the_same(std::shared_ptr lhs, + std::shared_ptr rhs) { size_t l_input_size = lhs->inputs().size(), r_input_size = rhs->inputs().size(); if (l_input_size != r_input_size) return false; @@ -164,13 +201,14 @@ bool squeezes_perform_the_same(std::shared_ptr lhs, std } bool ngraph::pass::SharedSqueeze::run_on_function(std::shared_ptr f) { - RUN_ON_FUNCTION_SCOPE(SharedSqueeze); - OV_ITT_SCOPED_TASK(itt::domains::IETransform, "ngraph::pass::SharedSqueeze"); + // TODO: enable conditional compile + // RUN_ON_FUNCTION_SCOPE(SharedSqueeze); + OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "ngraph::pass::SharedSqueeze"); bool graph_rewritten = false; std::map, std::vector>> source_to_squeeze; - for (const auto & node : f->get_ordered_ops()) { + for (const auto& node : f->get_ordered_ops()) { // Recursively apply transformation for sub-graph based operations if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { if (auto sub_graph = sub_graph_node->get_function()) { @@ -187,7 +225,8 @@ bool ngraph::pass::SharedSqueeze::run_on_function(std::shared_ptrget_instance_id() != child_squeeze->get_instance_id() && squeezes_perform_the_same(root_squeeze, child_squeeze)) { + if (root_squeeze->get_instance_id() != child_squeeze->get_instance_id() && + squeezes_perform_the_same(root_squeeze, child_squeeze)) { graph_rewritten |= replace_output_update_name(child_squeeze->output(0), root_squeeze->output(0)); } } diff --git a/ngraph/test/function.cpp b/ngraph/test/function.cpp index ff3a52e8b1c..604b779ba7b 100644 --- a/ngraph/test/function.cpp +++ b/ngraph/test/function.cpp @@ -6,6 +6,7 @@ #include +#include "openvino/core/partial_shape.hpp" #include "openvino/opsets/opset8.hpp" TEST(function, get_input_by_tensor_name) { @@ -511,3 +512,243 @@ TEST(function, DISABLED_create_function_with_incorrect_tensor_names_from_const_f auto f = std::make_shared(relu, ov::ParameterVector{arg0}); ASSERT_THROW(f->validate_nodes_and_infer_types(), ov::Exception); } + +TEST(function_reshape, ReshapedDynamicShapeLayout) { + std::shared_ptr ngraph; + { + ov::PartialShape shape({-1, 3, 22, 22}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->get_output_tensor(0).set_names({"tensor"}); + auto relu = std::make_shared(param); + + ov::ParameterVector params = {param}; + ngraph = std::make_shared(relu, params); + } + + EXPECT_TRUE(ngraph->input().get_partial_shape().is_dynamic()); + + std::map new_shape; + new_shape["tensor"] = ov::Shape{1, 3, 22, 22}; + ASSERT_NO_THROW(ngraph->reshape(new_shape)); + + EXPECT_FALSE(ngraph->input().get_partial_shape().is_dynamic()); + EXPECT_FALSE(ngraph->get_parameters().front()->get_partial_shape().is_dynamic()); +} + +TEST(function_reshape, ReshapeBatchReLU) { + std::shared_ptr ngraph; + { + ov::PartialShape shape({1, 3, 22, 22}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->get_output_tensor(0).set_names({"tensor"}); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ov::ParameterVector params = {param}; + ov::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + + { + std::map new_shape; + new_shape["tensor"] = ov::PartialShape{2, 3, 22, 22}; + ASSERT_NO_THROW(ngraph->reshape(new_shape)); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({2, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({2, 3, 22, 22})); +} + +TEST(function_reshape, ReshapeSpatialReLU) { + std::shared_ptr ngraph; + { + ov::PartialShape shape({1, 3, 22, 22}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->get_output_tensor(0).set_names({"tensor"}); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ov::ParameterVector params = {param}; + ov::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + + { + std::map new_shape; + new_shape["tensor"] = ov::PartialShape{1, 3, 25, 25}; + ASSERT_NO_THROW(ngraph->reshape(new_shape)); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 25, 25})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 25, 25})); +} + +TEST(function_reshape, ReshapeSpatialReLUWithoutReplaceParameter) { + std::shared_ptr ngraph; + { + ov::PartialShape shape({1, 3, 22, 22}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ov::ParameterVector params = {param}; + ov::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + + { + ngraph->get_parameters()[0]->set_partial_shape({1, 3, 25, 25}); + ngraph->validate_nodes_and_infer_types(); + } + + ASSERT_EQ(ngraph->input().get_partial_shape(), ov::Shape({1, 3, 25, 25})); + ASSERT_EQ(ngraph->output().get_partial_shape(), ov::Shape({1, 3, 25, 25})); +} + +TEST(function_reshape, ReshapeSpatialReLUStaticToDynamic) { + const ov::PartialShape refShape{1, 3, ov::Dimension::dynamic(), 25}; + std::shared_ptr ngraph; + { + ov::PartialShape shape({1, 3, 22, 22}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->get_output_tensor(0).set_names({"tensor"}); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ov::ParameterVector params = {param}; + ov::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + + { + std::map new_shape; + new_shape["tensor"] = refShape; + ASSERT_NO_THROW(ngraph->reshape(new_shape)); + } + + ASSERT_TRUE(ngraph->input(0).get_partial_shape().is_dynamic()); + ASSERT_TRUE(ngraph->output(0).get_partial_shape().is_dynamic()); + ASSERT_EQ(ngraph->input(0).get_partial_shape(), refShape); + ASSERT_EQ(ngraph->output(0).get_partial_shape(), refShape); +} + +TEST(function_reshape, ReshapeSpatialReLUStaticToFullyDynamic) { + const ov::PartialShape refShape = ov::PartialShape::dynamic(); + std::shared_ptr ngraph; + { + ov::PartialShape shape({1, 3, 22, 22}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->get_output_tensor(0).set_names({"tensor"}); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ov::ParameterVector params = {param}; + ov::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 22, 22})); + + { + std::map new_shape; + new_shape["tensor"] = refShape; + ASSERT_NO_THROW(ngraph->reshape(new_shape)); + } + + ASSERT_TRUE(ngraph->input().get_partial_shape().is_dynamic()); + ASSERT_TRUE(ngraph->output().get_partial_shape().is_dynamic()); + ASSERT_EQ(ngraph->input().get_partial_shape(), refShape); + ASSERT_EQ(ngraph->output().get_partial_shape(), refShape); +} + +TEST(function_reshape, ReshapeSpatialReLUDynamicToDynamic) { + const ov::PartialShape refShape{1, 3, ov::Dimension::dynamic(), 25}; + std::shared_ptr ngraph; + { + ov::PartialShape shape({1, 3, 22, ov::Dimension::dynamic()}); + ov::element::Type type(ov::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->get_output_tensor(0).set_names({"tensor"}); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ov::ParameterVector params = {param}; + ov::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->input().get_partial_shape(), ov::PartialShape({1, 3, 22, ov::Dimension::dynamic()})); + ASSERT_EQ(ngraph->output().get_partial_shape(), ov::PartialShape({1, 3, 22, ov::Dimension::dynamic()})); + + { + std::map new_shape; + new_shape["tensor"] = refShape; + ASSERT_NO_THROW(ngraph->reshape(new_shape)); + } + + ASSERT_TRUE(ngraph->input().get_partial_shape().is_dynamic()); + ASSERT_TRUE(ngraph->output().get_partial_shape().is_dynamic()); + ASSERT_EQ(ngraph->input().get_partial_shape(), refShape); + ASSERT_EQ(ngraph->output().get_partial_shape(), refShape); +} + +TEST(function_reshape, TestInvalidReshape) { + std::shared_ptr f; + { + auto input = std::make_shared(ov::element::f32, ov::Shape{1, 1000, 4}); + input->get_output_tensor(0).set_names({"tensor"}); + auto shape = ov::op::v0::Constant::create(ov::element::i64, {2}, {1, 4000}); + auto reshape = std::make_shared(input, shape, true); + f = std::make_shared(ov::OutputVector{reshape}, ov::ParameterVector{input}); + } + + ASSERT_ANY_THROW(f->reshape({{"tensor", ov::Shape({4})}})); + + auto param = f->get_parameters().front(); + ASSERT_EQ(param->get_output_shape(0), ov::Shape({1, 1000, 4})); + + ASSERT_NO_THROW(f->reshape({{"tensor", ov::Shape({1, 1000, 4})}})); +} + +TEST(function_reshape, TestReshapeWithInvalidTensorName) { + std::shared_ptr f; + { + auto input = std::make_shared(ov::element::f32, ov::Shape{1, 1000, 4}); + input->set_friendly_name("param"); + input->get_output_tensor(0).set_names({"tensor"}); + auto shape = ov::op::v0::Constant::create(ov::element::i64, {2}, {1, 4000}); + auto reshape = std::make_shared(input, shape, true); + f = std::make_shared(ov::OutputVector{reshape}, ov::ParameterVector{input}); + } + + // both operation names and tensor names are specified + ASSERT_ANY_THROW(f->reshape({{"param", ov::Shape({4, 4, 4})}, {"tensor", ov::Shape({4, 4, 4})}})); + + // operation name does not work + ASSERT_ANY_THROW(f->reshape({{"param", ov::Shape({4, 4, 4})}})); +}