diff --git a/docs/nGraph_DG/nGraphTransformation.md b/docs/nGraph_DG/nGraphTransformation.md
index 318aa50f435..96e89ec7691 100644
--- a/docs/nGraph_DG/nGraphTransformation.md
+++ b/docs/nGraph_DG/nGraphTransformation.md
@@ -253,6 +253,13 @@ To eliminate operation, nGraph has special method that considers all limitations
`replace_output_update_name` in case of successful replacement it automatically preserves friendly name and runtime info.
+## Transformation conditional compilation
+
+Transformation library has two internal macros to support conditional compilation feature.
+
+* `MATCHER_SCOPE(region)` - allows to disable the MatcherPass if matcher isn't used. The region name should be unique. This macro creates a local variable `matcher_name` which you should use as a matcher name.
+* `RUN_ON_FUNCTION_SCOPE(region)` - allows to disable run_on_function pass if it isn't used. The region name should be unique.
+
## Transformation writing essentials
When developing a transformation, you need to follow these transformation rules:
diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
index 17550da5b05..f88785151f8 100644
--- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
+++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
@@ -81,12 +81,7 @@ bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptr();
// Convolution/Deconvolution/FullyConnected fusions
- auto convert_convolutions = manager.register_pass();
- convert_convolutions->add_matcher();
- convert_convolutions->add_matcher();
- convert_convolutions->add_matcher();
- convert_convolutions->add_matcher();
- convert_convolutions->set_name("ngraph::pass::ConvertConvolutions");
+ manager.register_pass();
// Convolution/Deconvolution/FullyConnected fusions
auto fusion = manager.register_pass();
diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.cpp
index b37fde6348e..88ebbd8263d 100644
--- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.cpp
+++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.cpp
@@ -295,4 +295,4 @@ ngraph::pass::ConvertPriorBoxClusteredToLegacy::ConvertPriorBoxClusteredToLegacy
auto m = std::make_shared(unsqueeze, "ConvertPriorBoxClusteredToLegacy");
register_matcher(m, callback);
-}
\ No newline at end of file
+}
diff --git a/inference-engine/src/transformations/CMakeLists.txt b/inference-engine/src/transformations/CMakeLists.txt
index 4ca91c32a43..17617ac3855 100644
--- a/inference-engine/src/transformations/CMakeLists.txt
+++ b/inference-engine/src/transformations/CMakeLists.txt
@@ -28,7 +28,7 @@ ie_add_vs_version_file(NAME ${TARGET_NAME}
FILEDESCRIPTION "Inference Engine Transformations library")
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES}
- PRIVATE openvino::itt ngraph::builder pugixml)
+ PRIVATE openvino::conditional_compilation openvino::itt ngraph::builder pugixml)
target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR}
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src")
diff --git a/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp
index d0b9251ad00..37a275c2bae 100644
--- a/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp
+++ b/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp
@@ -48,37 +48,19 @@ public:
class ngraph::pass::ConvertReduceMeanToPooling: public ConvertReduceBase {
public:
NGRAPH_RTTI_DECLARATION;
- ConvertReduceMeanToPooling() {
- auto m = std::make_shared(
- ngraph::pattern::wrap_type({pattern::any_input(pattern::has_static_shape()),
- pattern::wrap_type()},
- pattern::has_static_shape()), "ConvertReduceMean");
- register_matcher(m, convert_reduce_to_pooling());
- }
+ ConvertReduceMeanToPooling();
};
class ngraph::pass::ConvertReduceMaxToPooling: public ConvertReduceBase {
public:
NGRAPH_RTTI_DECLARATION;
- ConvertReduceMaxToPooling() {
- auto m = std::make_shared(
- ngraph::pattern::wrap_type({pattern::any_input(pattern::has_static_shape()),
- pattern::wrap_type()},
- pattern::has_static_shape()), "ConvertReduceMax");
- register_matcher(m, convert_reduce_to_pooling());
- }
+ ConvertReduceMaxToPooling();
};
class ngraph::pass::ConvertReduceSumToPooling: public ConvertReduceBase {
public:
NGRAPH_RTTI_DECLARATION;
- ConvertReduceSumToPooling() {
- auto m = std::make_shared(
- ngraph::pattern::wrap_type({pattern::any_input(pattern::has_static_shape()),
- pattern::wrap_type()},
- pattern::has_static_shape()), "ConvertReduceSum");
- register_matcher(m, convert_reduce_to_pooling());
- }
+ ConvertReduceSumToPooling();
};
template
diff --git a/inference-engine/src/transformations/src/itt.hpp b/inference-engine/src/transformations/src/itt.hpp
new file mode 100644
index 00000000000..cabba94bfc1
--- /dev/null
+++ b/inference-engine/src/transformations/src/itt.hpp
@@ -0,0 +1,71 @@
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+/**
+ * @brief Defines openvino domains for tracing
+ * @file itt.hpp
+ */
+
+#pragma once
+
+#include
+#include
+
+namespace ngraph {
+namespace pass {
+namespace itt {
+namespace domains {
+ OV_ITT_DOMAIN(IETransform);
+} // namespace domains
+} // namespace itt
+} // namespace pass
+} // namespace ngraph
+
+OV_CC_DOMAINS(ngraph_pass);
+OV_CC_DOMAINS(internal_op);
+
+/*
+ * RUN_ON_FUNCTION_SCOPE macro allows to disable the run_on_function pass
+ * MATCHER_SCOPE macro allows to disable the MatcherPass if matcher isn't applied
+ * INTERNAL_OP_SCOPE macro allows to disable parts of internal nGraph operations if they are not used
+ */
+#if defined(SELECTIVE_BUILD_ANALYZER)
+#define RUN_ON_FUNCTION_SCOPE(region) OV_SCOPE(ngraph_pass, OV_CC_CAT(region, _run_on_function))
+#define MATCHER_SCOPE(region) \
+ const std::string matcher_name(OV_CC_TOSTRING(region))
+
+#define INTERNAL_OP_SCOPE(region) OV_SCOPE(internal_op, region)
+
+#elif defined(SELECTIVE_BUILD)
+
+#define MATCHER_SCOPE_(scope, region) \
+ if (OV_CC_SCOPE_IS_ENABLED(OV_CC_CAT3(scope, _, region)) == 0) \
+ throw ngraph::ngraph_error(std::string(OV_CC_TOSTRING(OV_CC_CAT3(scope, _, region))) + \
+ " is disabled!")
+
+#define MATCHER_SCOPE(region) \
+ const std::string matcher_name(OV_CC_TOSTRING(region)); \
+ if (OV_CC_SCOPE_IS_ENABLED(OV_CC_CAT3(ngraph_pass, _, region)) == 0) \
+ return
+#define INTERNAL_OP_SCOPE(region) MATCHER_SCOPE_(internal_op, region)
+#define RUN_ON_FUNCTION_SCOPE(region) MATCHER_SCOPE_(ngraph_pass, OV_CC_CAT(region, _run_on_function))
+
+#else
+#define MATCHER_SCOPE(region) \
+ const std::string matcher_name(OV_CC_TOSTRING(region))
+#define INTERNAL_OP_SCOPE(region)
+#define RUN_ON_FUNCTION_SCOPE(region)
+#endif
diff --git a/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp b/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp
index 2820c1d6325..a3c9417f2c8 100644
--- a/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp
+++ b/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp
@@ -3,6 +3,7 @@
//
#include "ngraph_ops/convolution_ie.hpp"
+#include "itt.hpp"
#include
#include
@@ -99,6 +100,7 @@ op::ConvolutionIE::ConvolutionIE(const Output& data_batch,
}
void op::ConvolutionIE::validate_and_infer_types() {
+ INTERNAL_OP_SCOPE(ConvolutionIE_validate_and_infer_types);
PartialShape data_batch_shape = get_input_partial_shape(0);
PartialShape filters_shape = get_input_partial_shape(1);
@@ -145,6 +147,7 @@ void op::ConvolutionIE::validate_and_infer_types() {
}
shared_ptr op::ConvolutionIE::clone_with_new_inputs(const ngraph::OutputVector & new_args) const {
+ INTERNAL_OP_SCOPE(ConvolutionIE_clone_with_new_inputs);
if (new_args.size() == 2) {
return make_shared(new_args.at(0),
new_args.at(1),
@@ -172,6 +175,7 @@ shared_ptr op::ConvolutionIE::clone_with_new_inputs(const ngraph::OutputVe
}
bool op::ConvolutionIE::visit_attributes(AttributeVisitor& visitor) {
+ INTERNAL_OP_SCOPE(ConvolutionIE_visit_attributes);
visitor.on_attribute("strides", m_strides);
visitor.on_attribute("dilations", m_dilations);
visitor.on_attribute("pads_begin", m_pads_begin);
diff --git a/inference-engine/src/transformations/src/ngraph_ops/deconvolution_ie.cpp b/inference-engine/src/transformations/src/ngraph_ops/deconvolution_ie.cpp
index dca81e5b44a..56a281ffbaa 100644
--- a/inference-engine/src/transformations/src/ngraph_ops/deconvolution_ie.cpp
+++ b/inference-engine/src/transformations/src/ngraph_ops/deconvolution_ie.cpp
@@ -6,6 +6,7 @@
#include
#include
#include
+#include "itt.hpp"
#include "ngraph_ops/deconvolution_ie.hpp"
@@ -64,6 +65,7 @@ op::DeconvolutionIE::DeconvolutionIE(const Output& data,
}
void op::DeconvolutionIE::validate_and_infer_types() {
+ INTERNAL_OP_SCOPE(DeconvolutionIE_validate_and_infer_types);
// To calculate output shape we use opset1::GroupConvolutionBackPropData
// but before we need to reshape weights from I(G*O)YX to GIOYX
auto weights = input_value(1);
@@ -89,6 +91,7 @@ void op::DeconvolutionIE::validate_and_infer_types() {
}
shared_ptr op::DeconvolutionIE::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
+ INTERNAL_OP_SCOPE(DeconvolutionIE_clone_with_new_inputs);
if (new_args.size() == 2) {
return make_shared(new_args.at(0),
new_args.at(1),
@@ -117,10 +120,11 @@ shared_ptr op::DeconvolutionIE::clone_with_new_inputs(const ngraph::Output
}
bool op::DeconvolutionIE::visit_attributes(AttributeVisitor& visitor) {
- visitor.on_attribute("strides", m_strides);
- visitor.on_attribute("dilations", m_dilations);
- visitor.on_attribute("pads_begin", m_pads_begin);
- visitor.on_attribute("pads_end", m_pads_end);
- visitor.on_attribute("group", m_group);
- return true;
+ INTERNAL_OP_SCOPE(DeconvolutionIE_visit_attributes);
+ visitor.on_attribute("strides", m_strides);
+ visitor.on_attribute("dilations", m_dilations);
+ visitor.on_attribute("pads_begin", m_pads_begin);
+ visitor.on_attribute("pads_end", m_pads_end);
+ visitor.on_attribute("group", m_group);
+ return true;
}
diff --git a/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp b/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp
index 2053e869434..4a406396988 100644
--- a/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp
+++ b/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp
@@ -6,6 +6,7 @@
#include
#include "ngraph_ops/nms_ie_internal.hpp"
+#include "itt.hpp"
using namespace std;
using namespace ngraph;
@@ -40,6 +41,7 @@ op::internal::NonMaxSuppressionIEInternal::NonMaxSuppressionIEInternal(const Out
}
std::shared_ptr op::internal::NonMaxSuppressionIEInternal::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
+ INTERNAL_OP_SCOPE(internal_NonMaxSuppressionIEInternal_clone_with_new_inputs);
if (new_args.size() == 6) {
return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3),
new_args.at(4), new_args.at(5), m_center_point_box, m_sort_result_descending,
@@ -53,6 +55,7 @@ std::shared_ptr op::internal::NonMaxSuppressionIEInternal::clone_with_new_
}
bool op::internal::NonMaxSuppressionIEInternal::visit_attributes(AttributeVisitor& visitor) {
+ INTERNAL_OP_SCOPE(internal_NonMaxSuppressionIEInternal_visit_attributes);
visitor.on_attribute("center_point_box", m_center_point_box);
visitor.on_attribute("sort_result_descending", m_sort_result_descending);
visitor.on_attribute("output_type", m_output_type);
@@ -79,6 +82,7 @@ int64_t op::internal::NonMaxSuppressionIEInternal::max_boxes_output_from_input()
}
void op::internal::NonMaxSuppressionIEInternal::validate_and_infer_types() {
+ INTERNAL_OP_SCOPE(internal_NonMaxSuppressionIEInternal_validate_and_infer_types);
const auto boxes_ps = get_input_partial_shape(boxes_port);
const auto scores_ps = get_input_partial_shape(scores_port);
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp
index 589e31d8d3b..27265821086 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp
@@ -19,6 +19,7 @@
#include
#include "transformations/common_optimizations/algebraic_simplification.hpp"
+#include "itt.hpp"
#include
#include
@@ -234,6 +235,7 @@ static bool replace_transpose_with_reshape(shared_ptr transpose) {
}
bool pass::AlgebraicSimplification::run_on_function(shared_ptr f) {
+ RUN_ON_FUNCTION_SCOPE(AlgebraicSimplification);
static const unordered_map)>> ops_to_simplifiers =
{{opset3::Gather::type_info, simplify_gather},
{opset2::ShapeOf::type_info, simplify_gather_shapeof},
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/broadcast_elementwise_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/broadcast_elementwise_fusion.cpp
index 5d3482e4474..d5824caf670 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/broadcast_elementwise_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/broadcast_elementwise_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/broadcast_elementwise_fusion.hpp"
#include
@@ -51,6 +52,7 @@ bool can_eliminate_broadcast(const ngraph::Output& eltwise,
}
ngraph::pass::BroadcastElementwiseFusion::BroadcastElementwiseFusion() {
+ MATCHER_SCOPE(BroadcastElementwiseFusion);
auto broadcast_input = pattern::any_input();
auto broadcast = pattern::wrap_type({broadcast_input, pattern::any_input()});
auto eltwise_input = pattern::any_input();
@@ -76,6 +78,6 @@ ngraph::pass::BroadcastElementwiseFusion::BroadcastElementwiseFusion() {
return false;
};
- auto m = std::make_shared(eltwise, "BroadcastElementwiseFusion");
+ auto m = std::make_shared(eltwise, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/clamp_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/clamp_fusion.cpp
index 00e79f73afd..4b3ade67cdb 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/clamp_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/clamp_fusion.cpp
@@ -11,11 +11,13 @@
#include
#include
#include
+#include "itt.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::ClampFusion, "ClampFusion", 0);
ngraph::pass::ClampFusion::ClampFusion() {
+ MATCHER_SCOPE(ClampFusion);
auto data_pattern = ngraph::pattern::any_input();
auto min_const_pattern = ngraph::pattern::wrap_type();
auto max_const_pattern = ngraph::pattern::wrap_type();
@@ -53,6 +55,6 @@ ngraph::pass::ClampFusion::ClampFusion() {
return true;
};
- auto m = std::make_shared(min_pattern, "ClampFusion");
+ auto m = std::make_shared(min_pattern, matcher_name);
this->register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp
index b89a9e78689..0755782d3d3 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp
@@ -5,7 +5,7 @@
#include
#include "transformations/init_node_info.hpp"
-#include "transformations/itt.hpp"
+#include "itt.hpp"
#include "transformations/common_optimizations/algebraic_simplification.hpp"
#include "transformations/common_optimizations/broadcast_elementwise_fusion.hpp"
#include "transformations/common_optimizations/nop_elimination.hpp"
@@ -55,6 +55,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::CommonOptimizations, "CommonOptimizations", 0);
bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr f) {
+ RUN_ON_FUNCTION_SCOPE(CommonOptimizations);
ngraph::pass::Manager manager(get_pass_config());
// This pass must be called first in pipeline
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/conv_bias_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/conv_bias_fusion.cpp
index 99ac553c271..e4225bdcb15 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/conv_bias_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/conv_bias_fusion.cpp
@@ -7,6 +7,7 @@
#include
#include
#include
+#include
#include
#include
@@ -16,6 +17,7 @@
#include
#include
+#include "itt.hpp"
using namespace ngraph;
@@ -65,127 +67,139 @@ bool IsConvInLowPrecision(const std::shared_ptr& conv) {
}
template
-ngraph::matcher_pass_callback get_callback() {
- ngraph::matcher_pass_callback callback = [](ngraph::pattern::Matcher &m) {
- auto eltwise = m.get_match_root();
+bool conv_callback(ngraph::pattern::Matcher &m) {
+ auto eltwise = m.get_match_root();
- std::shared_ptr m_const;
- std::shared_ptr m_conv;
- // FIXME: use auto [m_conv, m_const] when C++17 is available
- std::tie(m_conv, m_const) = parse_eltwise_inputs(eltwise);
- if (!m_conv || !m_const) {
- return false;
- }
+ std::shared_ptr m_const;
+ std::shared_ptr m_conv;
+ // FIXME: use auto [m_conv, m_const] when C++17 is available
+ std::tie(m_conv, m_const) = parse_eltwise_inputs(eltwise);
+ if (!m_conv || !m_const) {
+ return false;
+ }
- const auto & const_shape = m_const->get_shape();
- const auto & output_pshape = m_conv->get_output_partial_shape(0);
+ const auto & const_shape = m_const->get_shape();
+ const auto & output_pshape = m_conv->get_output_partial_shape(0);
- if (output_pshape.rank().is_dynamic() || output_pshape[1].is_dynamic()) {
- return false;
- }
+ if (output_pshape.rank().is_dynamic() || output_pshape[1].is_dynamic()) {
+ return false;
+ }
- const auto & output_rank = output_pshape.rank().get_length();
+ const auto & output_rank = output_pshape.rank().get_length();
- const int64_t channel_dim = output_pshape[1].get_length();
+ const int64_t channel_dim = output_pshape[1].get_length();
- bool is_scalar_multiplier(shape_size(const_shape) == 1);
+ bool is_scalar_multiplier(shape_size(const_shape) == 1);
- // Check that constant has shape [1, C, 1, 1] where the number of 1 is equal to
- // the number of spatial dimensions or it's a scalar. That means that Constant
- // applied per channel and can be fused into Convolution weights.
- // Also Constant shape rank must be less or equal Convolution output shape
- // otherwise fusion will break output broadcasting
- auto expected_shape = Shape(output_rank, 1);
- expected_shape[1] = channel_dim;
+ // Check that constant has shape [1, C, 1, 1] where the number of 1 is equal to
+ // the number of spatial dimensions or it's a scalar. That means that Constant
+ // applied per channel and can be fused into Convolution weights.
+ // Also Constant shape rank must be less or equal Convolution output shape
+ // otherwise fusion will break output broadcasting
+ auto expected_shape = Shape(output_rank, 1);
+ expected_shape[1] = channel_dim;
- if (op::util::check_for_broadcast(expected_shape, const_shape)) {
- return false;
- }
+ if (op::util::check_for_broadcast(expected_shape, const_shape)) {
+ return false;
+ }
- // Broadcast constant to [1, C, 1, 1] where the number of 1 is equal to
- // the number of weights dimensions.
- Output final_const = m_const;
- if (is_scalar_multiplier) {
- final_const = op::util::broadcastTo(m_const, expected_shape);
- }
+ // Broadcast constant to [1, C, 1, 1] where the number of 1 is equal to
+ // the number of weights dimensions.
+ Output final_const = m_const;
+ if (is_scalar_multiplier) {
+ final_const = op::util::broadcastTo(m_const, expected_shape);
+ }
- if (final_const.get_shape().size() > 1) {
- final_const = std::make_shared(final_const,
- ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {channel_dim}), true);
- }
+ if (final_const.get_shape().size() > 1) {
+ final_const = std::make_shared(final_const,
+ ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {channel_dim}), true);
+ }
- ngraph::Output new_conv, new_weights, new_bias;
- if (std::dynamic_pointer_cast(eltwise)) {
- // Fuse: ConvolutionIE/DeconvolutionIE->Add
- if (m_conv->inputs().size() == 2) {
- new_bias = final_const;
- } else {
- new_bias = std::make_shared(final_const, m_conv->input_value(2));
- }
- new_conv = m_conv->clone_with_new_inputs({m_conv->input_value(0), m_conv->input_value(1), new_bias});
- } else if (std::is_same() && std::dynamic_pointer_cast(eltwise) &&
- !IsConvInLowPrecision(m_conv)) {
- // Fuse: ConvolutionIE->Mul
- auto weights_shape = m_conv->input(1).get_shape();
-
- ngraph::Shape weights_const_shape(weights_shape.size(), 1);
- weights_const_shape[0] = weights_shape[0];
-
- auto const_reshape = std::make_shared(final_const,
- ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{weights_const_shape.size()}, weights_const_shape), true);
- new_weights = std::make_shared (m_conv->input_value(1), const_reshape);
- if (m_conv->inputs().size() == 2) {
- new_conv = m_conv->clone_with_new_inputs({m_conv->input_value(0), new_weights});
- } else {
- auto bias_reshape = std::make_shared(final_const,
- ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {weights_shape[0]}), true);
- new_bias = std::make_shared(bias_reshape, final_const);
- new_conv = m_conv->clone_with_new_inputs({m_conv->input_value(0), new_weights, new_bias});
- }
+ ngraph::Output new_conv, new_weights, new_bias;
+ if (std::dynamic_pointer_cast(eltwise)) {
+ // Fuse: ConvolutionIE/DeconvolutionIE->Add
+ if (m_conv->inputs().size() == 2) {
+ new_bias = final_const;
} else {
- return false;
+ new_bias = std::make_shared(final_const, m_conv->input_value(2));
}
+ new_conv = m_conv->clone_with_new_inputs({m_conv->input_value(0), m_conv->input_value(1), new_bias});
+ } else if (std::is_same() && std::dynamic_pointer_cast(eltwise) &&
+ !IsConvInLowPrecision(m_conv)) {
+ // Fuse: ConvolutionIE->Mul
+ auto weights_shape = m_conv->input(1).get_shape();
- ngraph::copy_runtime_info({m_conv, eltwise}, new_conv.get_node_shared_ptr());
- new_conv.get_node_shared_ptr()->set_friendly_name(m.get_match_root()->get_friendly_name());
- ngraph::replace_node(m.get_match_root(), new_conv.get_node_shared_ptr());
- return true;
- };
- return callback;
+ ngraph::Shape weights_const_shape(weights_shape.size(), 1);
+ weights_const_shape[0] = weights_shape[0];
+
+ auto const_reshape = std::make_shared(final_const,
+ ngraph::opset1::Constant::create(ngraph::element::i64,
+ ngraph::Shape{weights_const_shape.size()},
+ weights_const_shape),
+ true);
+ new_weights = std::make_shared (m_conv->input_value(1), const_reshape);
+ if (m_conv->inputs().size() == 2) {
+ new_conv = m_conv->clone_with_new_inputs({m_conv->input_value(0), new_weights});
+ } else {
+ auto bias_reshape = std::make_shared(final_const,
+ ngraph::opset1::Constant::create(ngraph::element::i64,
+ ngraph::Shape{1},
+ {weights_shape[0]}),
+ true);
+ new_bias = std::make_shared(bias_reshape, final_const);
+ new_conv = m_conv->clone_with_new_inputs({m_conv->input_value(0), new_weights, new_bias});
+ }
+ } else {
+ return false;
+ }
+
+ ngraph::copy_runtime_info({m_conv, eltwise}, new_conv.get_node_shared_ptr());
+ new_conv.get_node_shared_ptr()->set_friendly_name(m.get_match_root()->get_friendly_name());
+ ngraph::replace_node(m.get_match_root(), new_conv.get_node_shared_ptr());
+ return true;
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvAddFusion, "ConvAddFusion", 0);
ngraph::pass::ConvAddFusion::ConvAddFusion() {
+ MATCHER_SCOPE(ConvAddFusion);
auto conv = ngraph::pattern::wrap_type(pattern::consumers_count(1));
auto add = ngraph::pattern::wrap_type({conv, std::make_shared()});
- matcher_pass_callback callback = get_callback();
+ matcher_pass_callback callback = [](ngraph::pattern::Matcher &m) {
+ return conv_callback(m);
+ };
- auto m = std::make_shared(add, "ConvAddFusion");
+ auto m = std::make_shared(add, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvMultiplyFusion, "ConvMultiplyFusion", 0);
ngraph::pass::ConvMultiplyFusion::ConvMultiplyFusion() {
+ MATCHER_SCOPE(ConvMultiplyFusion);
auto conv = ngraph::pattern::wrap_type(pattern::consumers_count(1));
auto add = ngraph::pattern::wrap_type({conv, std::make_shared()});
- matcher_pass_callback callback = get_callback();
+ matcher_pass_callback callback = [](ngraph::pattern::Matcher &m) {
+ return conv_callback(m);
+ };
- auto m = std::make_shared(add, "ConvMultiplyFusion");
+ auto m = std::make_shared(add, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::DeconvAddFusion, "DeconvAddFusion", 0);
ngraph::pass::DeconvAddFusion::DeconvAddFusion() {
+ MATCHER_SCOPE(DeconvAddFusion);
auto conv = ngraph::pattern::wrap_type(pattern::consumers_count(1));
auto add = ngraph::pattern::wrap_type({conv, std::make_shared()});
- matcher_pass_callback callback = get_callback();
+ matcher_pass_callback callback = [](ngraph::pattern::Matcher &m){
+ return conv_callback(m);
+ };
- auto m = std::make_shared(add, "DeconvAddFusion");
+ auto m = std::make_shared(add, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp
index 4f10f679743..06dea782333 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp
@@ -3,6 +3,7 @@
//
#include "transformations/common_optimizations/conv_mul_fusion.hpp"
+#include "itt.hpp"
#include
#include
@@ -18,6 +19,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvolutionMultiplyFusion, "ConvolutionMultiplyFusion", 0);
ngraph::pass::ConvolutionMultiplyFusion::ConvolutionMultiplyFusion() {
+ MATCHER_SCOPE(ConvolutionMultiplyFusion);
auto input = pattern::any_input();
auto weights = ngraph::pattern::any_input(pattern::has_static_dim(0) /* has OIYX layout */);
auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1));
@@ -74,13 +76,14 @@ ngraph::pass::ConvolutionMultiplyFusion::ConvolutionMultiplyFusion() {
return true;
};
- auto m = std::make_shared(mul, "ConvolutionMultiplyFusion");
+ auto m = std::make_shared(mul, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::GroupConvolutionMultiplyFusion, "GroupConvolutionMultiplyFusion", 0);
ngraph::pass::GroupConvolutionMultiplyFusion::GroupConvolutionMultiplyFusion() {
+ MATCHER_SCOPE(GroupConvolutionMultiplyFusion);
auto input = pattern::any_input();
auto weights = ngraph::pattern::any_input();//pattern::has_static_dims({0, 1}) /* has GOIYX layout */);
auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1));
@@ -139,13 +142,14 @@ ngraph::pass::GroupConvolutionMultiplyFusion::GroupConvolutionMultiplyFusion() {
return true;
};
- auto m = std::make_shared(mul, "GroupConvolutionMultiplyFusion");
+ auto m = std::make_shared(mul, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvolutionBackpropDataMultiplyFusion, "ConvolutionBackpropDataMultiplyFusion", 0);
ngraph::pass::ConvolutionBackpropDataMultiplyFusion::ConvolutionBackpropDataMultiplyFusion() {
+ MATCHER_SCOPE(ConvolutionBackpropDataMultiplyFusion);
auto input = pattern::any_input();
auto weights = ngraph::pattern::any_input(pattern::has_static_dim(1) /* has IOYX layout */);
auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1));
@@ -202,13 +206,14 @@ ngraph::pass::ConvolutionBackpropDataMultiplyFusion::ConvolutionBackpropDataMult
return true;
};
- auto m = std::make_shared(mul, "ConvolutionBackpropDataMultiplyFusion");
+ auto m = std::make_shared(mul, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::GroupConvolutionBackpropDataMultiplyFusion, "GroupConvolutionBackpropDataMultiplyFusion", 0);
ngraph::pass::GroupConvolutionBackpropDataMultiplyFusion::GroupConvolutionBackpropDataMultiplyFusion() {
+ MATCHER_SCOPE(GroupConvolutionBackpropDataMultiplyFusion);
auto input = pattern::any_input();
auto weights = ngraph::pattern::any_input(pattern::has_static_dims({0, 2}) /* has GIOYX layout */);
auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1));
@@ -267,6 +272,6 @@ ngraph::pass::GroupConvolutionBackpropDataMultiplyFusion::GroupConvolutionBackpr
return true;
};
- auto m = std::make_shared(mul, "GroupConvolutionMultiplyFusion");
+ auto m = std::make_shared(mul, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/convert_quantize_dequantize.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/convert_quantize_dequantize.cpp
index 4cb11ab10ad..b396f51895b 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/convert_quantize_dequantize.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/convert_quantize_dequantize.cpp
@@ -4,6 +4,7 @@
#include "transformations/common_optimizations/convert_quantize_dequantize.hpp"
#include "transformations/utils/utils.hpp"
+#include "itt.hpp"
#include
#include
@@ -57,6 +58,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertQuantizeDequantize, "ConvertQuantizeDequantize", 0);
ngraph::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize() {
+ MATCHER_SCOPE(ConvertQuantizeDequantize);
auto data_pattern = ngraph::pattern::any_input();
auto input_low_pattern = ngraph::pattern::any_input();
auto input_high_pattern = ngraph::pattern::any_input();
@@ -149,6 +151,6 @@ ngraph::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize() {
return true;
};
- auto m = std::make_shared(mul_pattern, "ConvertQuantizeDequantize");
+ auto m = std::make_shared(mul_pattern, matcher_name);
this->register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp
index 8fa97e8b69a..a210b485834 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp
@@ -3,6 +3,7 @@
//
#include "transformations/common_optimizations/depth_to_space_fusion.hpp"
+#include "itt.hpp"
#include
#include
@@ -84,6 +85,7 @@ bool check_depth_first(const ngraph::Shape& shape_input, const ngraph::Shape& sh
NGRAPH_RTTI_DEFINITION(ngraph::pass::DepthToSpaceFusion, "DepthToSpaceFusion", 0);
ngraph::pass::DepthToSpaceFusion::DepthToSpaceFusion() {
+ MATCHER_SCOPE(DepthToSpaceFusion);
auto input0 = std::make_shared(element::f32, Shape{1, 1, 1, 1});
auto input1 = std::make_shared(element::i64, Shape{4});
auto input2 = std::make_shared(element::i64, Shape{4});
@@ -159,6 +161,6 @@ ngraph::pass::DepthToSpaceFusion::DepthToSpaceFusion() {
return true;
};
- auto m = std::make_shared(reshape_after, "DepthToSpaceFusion");
+ auto m = std::make_shared(reshape_after, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp
index 7a62287285c..36c69e97335 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/fq_mul_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/fq_mul_fusion.hpp"
#include "transformations/utils/utils.hpp"
@@ -15,29 +16,29 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::FakeQuantizeMulFusion, "FakeQuantizeMulFusion", 0);
namespace {
- std::pair, ngraph::Output>
- get_adjusted_output_range(ngraph::Output out_low,
- ngraph::Output out_high,
- ngraph::Output multiplier) {
- const auto mul_out_low = std::make_shared(out_low, multiplier);
- const auto mul_out_high = std::make_shared(out_high, multiplier);
- copy_runtime_info({out_low.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
- mul_out_low);
- copy_runtime_info({out_high.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
- mul_out_high);
+std::pair, ngraph::Output>
+ get_adjusted_output_range(ngraph::Output out_low,
+ ngraph::Output out_high,
+ ngraph::Output multiplier) {
+ const auto mul_out_low = std::make_shared(out_low, multiplier);
+ const auto mul_out_high = std::make_shared(out_high, multiplier);
+ copy_runtime_info({out_low.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
+ mul_out_low);
+ copy_runtime_info({out_high.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
+ mul_out_high);
- ngraph::OutputVector new_out_low(1), new_out_high(1);
+ ngraph::OutputVector new_out_low(1), new_out_high(1);
- if (!mul_out_low->constant_fold(new_out_low, {out_low, multiplier})) {
- new_out_low[0] = mul_out_low;
+ if (!mul_out_low->constant_fold(new_out_low, {out_low, multiplier})) {
+ new_out_low[0] = mul_out_low;
+ }
+
+ if (!mul_out_high->constant_fold(new_out_high, {out_high, multiplier})) {
+ new_out_high[0] = mul_out_high;
+ }
+
+ return {new_out_low[0], new_out_high[0]};
}
-
- if (!mul_out_high->constant_fold(new_out_high, {out_high, multiplier})) {
- new_out_high[0] = mul_out_high;
- }
-
- return {new_out_low[0], new_out_high[0]};
- }
} // namespace
// This transformation multiplies the "output_low" and "output_high" inputs of the FQ operation
@@ -62,49 +63,49 @@ namespace {
//
ngraph::pass::FakeQuantizeMulFusion::FakeQuantizeMulFusion() {
- const auto fq_output_low_p = ngraph::pattern::any_input();
- const auto fq_output_high_p = ngraph::pattern::any_input();
+ MATCHER_SCOPE(FakeQuantizeMulFusion);
+ const auto fq_output_low_p = ngraph::pattern::any_input();
+ const auto fq_output_high_p = ngraph::pattern::any_input();
- const auto fq_node_p = ngraph::pattern::wrap_type(
- {ngraph::pattern::any_input(),
- ngraph::pattern::any_input(),
- ngraph::pattern::any_input(),
- fq_output_low_p,
- fq_output_high_p},
- pattern::consumers_count(1));
+ const auto fq_node_p = ngraph::pattern::wrap_type(
+ {ngraph::pattern::any_input(),
+ ngraph::pattern::any_input(),
+ ngraph::pattern::any_input(),
+ fq_output_low_p,
+ fq_output_high_p},
+ pattern::consumers_count(1));
- const auto mul_constant_p = ngraph::pattern::wrap_type();
- const auto mul_node_p = ngraph::pattern::wrap_type(
- {fq_node_p, mul_constant_p}, pattern::consumers_count(1));
+ const auto mul_constant_p = ngraph::pattern::wrap_type();
+ const auto mul_node_p = ngraph::pattern::wrap_type(
+ {fq_node_p, mul_constant_p}, pattern::consumers_count(1));
- ngraph::matcher_pass_callback callback = [=](pattern::Matcher &m) {
- const auto& pattern_map = m.get_pattern_value_map();
+ ngraph::matcher_pass_callback callback = [=](pattern::Matcher &m) {
+ const auto& pattern_map = m.get_pattern_value_map();
- const auto fq_node = pattern_map.at(fq_node_p).get_node_shared_ptr();
+ const auto fq_node = pattern_map.at(fq_node_p).get_node_shared_ptr();
- const auto original_output_low = pattern_map.at(fq_output_low_p);
- const auto original_output_high = pattern_map.at(fq_output_high_p);
- const auto mul_constant = pattern_map.at(mul_constant_p);
+ const auto original_output_low = pattern_map.at(fq_output_low_p);
+ const auto original_output_high = pattern_map.at(fq_output_high_p);
+ const auto mul_constant = pattern_map.at(mul_constant_p);
- const auto new_output_limits = get_adjusted_output_range(
- original_output_low, original_output_high, mul_constant);
+ const auto new_output_limits = get_adjusted_output_range(
+ original_output_low, original_output_high, mul_constant);
- const auto new_fq_node = fq_node->clone_with_new_inputs({fq_node->input_value(0),
- fq_node->input_value(1),
- fq_node->input_value(2),
- new_output_limits.first,
- new_output_limits.second});
+ const auto new_fq_node = fq_node->clone_with_new_inputs({fq_node->input_value(0),
+ fq_node->input_value(1),
+ fq_node->input_value(2),
+ new_output_limits.first,
+ new_output_limits.second});
- const auto mul_node = pattern_map.at(mul_node_p).get_node_shared_ptr();
- replace_node(mul_node, new_fq_node);
+ const auto mul_node = pattern_map.at(mul_node_p).get_node_shared_ptr();
+ replace_node(mul_node, new_fq_node);
- new_fq_node->set_friendly_name(fq_node->get_friendly_name());
- copy_runtime_info({fq_node, mul_node}, new_fq_node);
+ new_fq_node->set_friendly_name(fq_node->get_friendly_name());
+ copy_runtime_info({fq_node, mul_node}, new_fq_node);
- return true;
- };
+ return true;
+ };
- auto m = std::make_shared(mul_node_p,
- "FakeQuantizeMulFusion");
- this->register_matcher(m, callback);
+ auto m = std::make_shared(mul_node_p, matcher_name);
+ this->register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/fq_reshape_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/fq_reshape_fusion.cpp
index 0697a7ac4d5..224b3c0d1ad 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/fq_reshape_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/fq_reshape_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/fq_reshape_fusion.hpp"
#include
@@ -14,6 +15,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::FakeQuantizeReshapeFusion, "FakeQuantizeReshapeFusion", 0);
ngraph::pass::FakeQuantizeReshapeFusion::FakeQuantizeReshapeFusion() {
+ MATCHER_SCOPE(FakeQuantizeReshapeFusion);
const auto fq_node_p = ngraph::pattern::wrap_type(
{ngraph::pattern::wrap_type(), // for weights only
ngraph::pattern::any_input(),
@@ -74,6 +76,6 @@ ngraph::pass::FakeQuantizeReshapeFusion::FakeQuantizeReshapeFusion() {
return true;
};
- auto m = std::make_shared(reshape_node_p, "FakeQuantizeReshapeFusion");
+ auto m = std::make_shared(reshape_node_p, matcher_name);
this->register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/hsigmoid_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/hsigmoid_fusion.cpp
index f19c91eeba1..28c11ef5cc7 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/hsigmoid_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/hsigmoid_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/hsigmoid_fusion.hpp"
#include "transformations/utils/utils.hpp"
@@ -16,6 +17,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::HSigmoidFusion, "HSigmoidFusion", 0);
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSigmoidFusionWithReluDiv, "HSigmoidFusionWithReluDiv", 0);
ngraph::pass::HSigmoidFusionWithReluDiv::HSigmoidFusionWithReluDiv() {
+ MATCHER_SCOPE(HSigmoidFusionWithReluDiv);
// Replaces a sub-graph ((min(Relu(x + 3), 6)) / 6 with a HSigmoid op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -55,13 +57,14 @@ ngraph::pass::HSigmoidFusionWithReluDiv::HSigmoidFusionWithReluDiv() {
return true;
};
- auto m = std::make_shared(div, "HSigmoidWithReluDivFusion");
+ auto m = std::make_shared(div, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSigmoidFusionWithReluMul, "HSigmoidFusionWithReluMul", 0);
ngraph::pass::HSigmoidFusionWithReluMul::HSigmoidFusionWithReluMul() {
+ MATCHER_SCOPE(HSigmoidFusionWithReluMul);
// Replaces a sub-graph ((min(Relu(x + 3), 6)) * const(1/6) with a HSigmoid op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -102,13 +105,14 @@ ngraph::pass::HSigmoidFusionWithReluMul::HSigmoidFusionWithReluMul() {
return true;
};
- auto m = std::make_shared(mul_second, "HSigmoidWithReluMulFusion");
+ auto m = std::make_shared(mul_second, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSigmoidFusionWithoutRelu, "HSigmoidFusionWithoutRelu", 0);
ngraph::pass::HSigmoidFusionWithoutRelu::HSigmoidFusionWithoutRelu() {
+ MATCHER_SCOPE(HSigmoidFusionWithoutRelu);
// Replaces a sub-graph (min(max(x + 3, 0), 6) / 6) with a HSigmoid op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -152,13 +156,14 @@ ngraph::pass::HSigmoidFusionWithoutRelu::HSigmoidFusionWithoutRelu() {
return true;
};
- auto m = std::make_shared(div, "HSigmoidWithoutReluFusion");
+ auto m = std::make_shared(div, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSigmoidFusionWithClamp, "HSigmoidFusionWithClamp", 0);
ngraph::pass::HSigmoidFusionWithClamp::HSigmoidFusionWithClamp() {
+ MATCHER_SCOPE(HSigmoidFusionWithClamp);
// Replaces a sub-graph (Clamp(x + 3, 0, 6) * const(1/6)) with a HSigmoid op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -193,6 +198,6 @@ ngraph::pass::HSigmoidFusionWithClamp::HSigmoidFusionWithClamp() {
return true;
};
- auto m = std::make_shared(mul_first, "HSigmoidWithClampFusion");
+ auto m = std::make_shared(mul_first, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/hswish_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/hswish_fusion.cpp
index e64909bb594..0bb0815eaf5 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/hswish_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/hswish_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/hswish_fusion.hpp"
#include "transformations/utils/utils.hpp"
@@ -16,6 +17,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::HSwishFusion, "HSwishFusion", 0);
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSwishFusionWithReluDiv, "HSwishFusionWithReluDiv", 0);
ngraph::pass::HSwishFusionWithReluDiv::HSwishFusionWithReluDiv() {
+ MATCHER_SCOPE(HSwishFusionWithReluDiv);
// Replaces a sub-graph (x * (min(Relu(x + 3), 6)) / 6 with a HSwish op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -60,13 +62,14 @@ ngraph::pass::HSwishFusionWithReluDiv::HSwishFusionWithReluDiv() {
return true;
};
- auto m = std::make_shared(div, "HSwishWithReluDivFusion");
+ auto m = std::make_shared(div, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSwishFusionWithReluMul, "HSwishFusionWithReluMul", 0);
ngraph::pass::HSwishFusionWithReluMul::HSwishFusionWithReluMul() {
+ MATCHER_SCOPE(HSwishFusionWithReluMul);
// Replaces a sub-graph (x * (min(Relu(x + 3), 6)) * const(1/6) with a HSwish op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -111,13 +114,14 @@ ngraph::pass::HSwishFusionWithReluMul::HSwishFusionWithReluMul() {
return true;
};
- auto m = std::make_shared(mul_second, "HSwishWithReluMulFusion");
+ auto m = std::make_shared(mul_second, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSwishFusionWithoutRelu, "HSwishFusionWithoutRelu", 0);
ngraph::pass::HSwishFusionWithoutRelu::HSwishFusionWithoutRelu() {
+ MATCHER_SCOPE(HSwishFusionWithoutRelu);
// Replaces a sub-graph x * (min(max(x + 3, 0), 6) / 6) with a HSwish op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -166,13 +170,14 @@ ngraph::pass::HSwishFusionWithoutRelu::HSwishFusionWithoutRelu() {
return true;
};
- auto m = std::make_shared(mul, "HSwishWithoutReluFusion");
+ auto m = std::make_shared(mul, matcher_name);
register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::HSwishFusionWithClamp, "HSwishFusionWithClamp", 0);
ngraph::pass::HSwishFusionWithClamp::HSwishFusionWithClamp() {
+ MATCHER_SCOPE(HSwishFusionWithClamp);
// Replaces a sub-graph x * (Clamp(x + 3, 0, 6) * const(1/6)) with a HSwish op.
auto input = ngraph::pattern::any_input();
auto add_constant = ngraph::pattern::wrap_type();
@@ -211,6 +216,6 @@ ngraph::pass::HSwishFusionWithClamp::HSwishFusionWithClamp() {
return true;
};
- auto m = std::make_shared(mul_second, "HSwishWithClampFusion");
+ auto m = std::make_shared(mul_second, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/lin_op_sequence_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/lin_op_sequence_fusion.cpp
index 1f2279ddaad..8b2538bd906 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/lin_op_sequence_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/lin_op_sequence_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/lin_op_sequence_fusion.hpp"
#include
@@ -31,6 +32,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::LinOpSequenceFusion, "LinOpSequenceFusion",
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMultiplyFusion, "AddMultiplyFusion", 0);
ngraph::pass::AddMultiplyFusion::AddMultiplyFusion() {
+ MATCHER_SCOPE(AddMultiplyFusion);
// Create Add->Multiply pattern where Add has exactly one consumer
auto m_data = ngraph::pattern::any_input();
auto m_add_constant = ngraph::pattern::wrap_type();
@@ -70,13 +72,14 @@ ngraph::pass::AddMultiplyFusion::AddMultiplyFusion() {
return true;
};
- auto m = std::make_shared(m_mul, "AddMultiplyFusion");
+ auto m = std::make_shared(m_mul, matcher_name);
this->register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddAddFusion, "AddAddFusion", 0);
ngraph::pass::AddAddFusion::AddAddFusion() {
+ MATCHER_SCOPE(AddAddFusion);
// Create Add->Add pattern where first Add has exactly one consumer
auto m_data = ngraph::pattern::any_input();
auto m_add1_constant = ngraph::pattern::wrap_type();
@@ -104,13 +107,14 @@ ngraph::pass::AddAddFusion::AddAddFusion() {
return true;
};
- auto m = std::make_shared(m_add2, "AddAddFusion");
+ auto m = std::make_shared(m_add2, matcher_name);
this->register_matcher(m, callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::MultiplyMultiplyFusion, "MultiplyMultiplyFusion", 0);
ngraph::pass::MultiplyMultiplyFusion::MultiplyMultiplyFusion() {
+ MATCHER_SCOPE(MultiplyMultiplyFusion);
// Create Multiply->Multiply pattern where first Multiply has exactly one consumer
auto m_data = ngraph::pattern::any_input();
auto m_mul1_constant = ngraph::pattern::wrap_type();
@@ -138,6 +142,6 @@ ngraph::pass::MultiplyMultiplyFusion::MultiplyMultiplyFusion() {
return true;
};
- auto m = std::make_shared(m_mul2, "MultiplyMultiplyFusion");
+ auto m = std::make_shared(m_mul2, matcher_name);
this->register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/mish_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/mish_fusion.cpp
index bf395d76100..da1b8ab9243 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/mish_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/mish_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/mish_fusion.hpp"
#include
@@ -14,6 +15,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::MishFusion, "MishFusion", 0);
ngraph::pass::MishFusion::MishFusion() {
+ MATCHER_SCOPE(MishFusion);
auto input = ngraph::pattern::any_input();
auto exp = std::make_shared(input);
auto add = std::make_shared(exp, ngraph::pattern::wrap_type());
@@ -37,6 +39,6 @@ ngraph::pass::MishFusion::MishFusion() {
return true;
};
- auto m = std::make_shared(mul, "MishFusion");
+ auto m = std::make_shared(mul, matcher_name);
register_matcher(m, matcher_pass_callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp
index 3ff7f1e0423..af9386e9cde 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp
@@ -14,6 +14,7 @@
// limitations under the License.
//*****************************************************************************
+#include "itt.hpp"
#include
#include
#include
@@ -329,6 +330,7 @@ static bool eliminate_squeeze(const std::shared_ptr& node) {
NGRAPH_RTTI_DEFINITION(ngraph::pass::NopElimination, "NopElimination", 0);
bool pass::NopElimination::run_on_function(std::shared_ptr function) {
+ RUN_ON_FUNCTION_SCOPE(NopElimination);
static const std::unordered_map&)>>
dispatcher{{TI(opset3::Pad), &eliminate_nop},
{TI(opset3::Convert), &eliminate_convert},
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/normalize_l2_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/normalize_l2_fusion.cpp
index e701645bc74..2b2523cf6bc 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/normalize_l2_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/normalize_l2_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/normalize_l2_fusion.hpp"
#include "transformations/utils/utils.hpp"
@@ -17,6 +18,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::NormalizeL2Fusion, "NormalizeL2Fusion", 0);
NGRAPH_RTTI_DEFINITION(ngraph::pass::NormalizeL2FusionWithMax, "NormalizeL2FusionWithMax", 0);
ngraph::pass::NormalizeL2FusionWithMax::NormalizeL2FusionWithMax() {
+ MATCHER_SCOPE(NormalizeL2FusionWithMax);
auto input = ngraph::pattern::any_input();
auto exp = ngraph::pattern::wrap_type();
@@ -63,13 +65,14 @@ ngraph::pass::NormalizeL2FusionWithMax::NormalizeL2FusionWithMax() {
return true;
};
- auto m = std::make_shared(divide, "NormalizeL2FusionWithMax");
+ auto m = std::make_shared(divide, matcher_name);
register_matcher(m, matcher_pass_callback);
}
NGRAPH_RTTI_DEFINITION(ngraph::pass::NormalizeL2FusionWithAdd, "NormalizeL2FusionWithAdd", 0);
ngraph::pass::NormalizeL2FusionWithAdd::NormalizeL2FusionWithAdd() {
+ MATCHER_SCOPE(NormalizeL2FusionWithAdd);
auto input = ngraph::pattern::any_input();
auto exp = ngraph::pattern::wrap_type();
@@ -116,6 +119,6 @@ ngraph::pass::NormalizeL2FusionWithAdd::NormalizeL2FusionWithAdd() {
return true;
};
- auto m = std::make_shared(divide, "NormalizeL2FusionWithMax");
+ auto m = std::make_shared(divide, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp
index 9c335cdc925..55521f4f586 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp
@@ -5,6 +5,7 @@
#include
#include
+#include "itt.hpp"
#include
#include
#include
@@ -15,6 +16,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::StridedSliceOptimization, "StridedSliceOpti
NGRAPH_RTTI_DEFINITION(ngraph::pass::UselessStridedSliceEraser, "UselessStridedSliceEraser", 0);
bool ngraph::pass::UselessStridedSliceEraser::run_on_function(std::shared_ptr f) {
+ RUN_ON_FUNCTION_SCOPE(UselessStridedSliceEraser);
bool rewritten = false;
for (auto & node : f->get_ordered_ops()) {
// Recursively apply transformation for sub-graph based operations
@@ -89,6 +91,7 @@ bool strided_slices_perform_the_same(std::shared_ptr f) {
+ RUN_ON_FUNCTION_SCOPE(SharedStridedSliceEraser);
bool graph_rewritten = false;
std::map, std::vector>> source_to_ss;
@@ -120,6 +123,7 @@ bool ngraph::pass::SharedStridedSliceEraser::run_on_function(std::shared_ptr f) {
+ RUN_ON_FUNCTION_SCOPE(GroupedStridedSliceOptimizer);
bool graph_rewritten = false;
using planned_slice = std::pair, ngraph::SlicePlan>;
@@ -232,6 +236,7 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr
}
bool ngraph::pass::StridedSliceOptimization::run_on_function(std::shared_ptr f) {
+ RUN_ON_FUNCTION_SCOPE(StridedSliceOptimization);
bool rewritten = UselessStridedSliceEraser().run_on_function(f);
rewritten |= SharedStridedSliceEraser().run_on_function(f);
rewritten |= GroupedStridedSliceOptimizer().run_on_function(f);
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp
index c09d88a6b44..d879518d547 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/pull_transpose_through_fq.hpp"
#include
@@ -14,6 +15,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::PullTransposeThroughFQUp, "PullTransposeThroughFQUp", 0);
ngraph::pass::PullTransposeThroughFQUp::PullTransposeThroughFQUp() {
+ MATCHER_SCOPE(PullTransposeThroughFQUp);
auto m_fq = pattern::wrap_type({pattern::any_input(pattern::has_static_rank()),
pattern::any_input(pattern::has_static_rank()),
pattern::any_input(pattern::has_static_rank()),
@@ -57,6 +59,6 @@ ngraph::pass::PullTransposeThroughFQUp::PullTransposeThroughFQUp() {
return true;
};
- auto m = std::make_shared(m_transpose, "PullTransposeThroughFQUp");
+ auto m = std::make_shared(m_transpose, matcher_name);
this->register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/remove_filtering_boxes_by_size.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/remove_filtering_boxes_by_size.cpp
index 8edac8f654c..5c59ddd9d1c 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/remove_filtering_boxes_by_size.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/remove_filtering_boxes_by_size.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include
#include
@@ -13,6 +14,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::RemoveFilteringBoxesBySize, "RemoveFilteringBoxesBySize", 0);
ngraph::pass::RemoveFilteringBoxesBySize::RemoveFilteringBoxesBySize() {
+ MATCHER_SCOPE(RemoveFilteringBoxesBySize);
// variadic split
auto data = std::make_shared(element::f32, Shape{1000, 4});
auto sizes = opset3::Constant::create(element::i64, Shape{4}, std::vector({1, 1, 1, 1}));
@@ -103,6 +105,6 @@ ngraph::pass::RemoveFilteringBoxesBySize::RemoveFilteringBoxesBySize() {
return true;
};
- auto m = std::make_shared(cast, "RemoveFilteringBoxesBySize");
+ auto m = std::make_shared(cast, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp
index bd57b0a84d7..6c3d3d0bde6 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/softplus_fusion.hpp"
#include
@@ -14,6 +15,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::SoftPlusFusion, "SoftPlusFusion", 0);
ngraph::pass::SoftPlusFusion::SoftPlusFusion() {
+ MATCHER_SCOPE(SoftPlusFusion);
// fuses ln(exp(x) + 1.0) operations into SoftPlus(x)
auto input = ngraph::pattern::any_input();
auto exp = std::make_shared(input);
@@ -50,6 +52,6 @@ ngraph::pass::SoftPlusFusion::SoftPlusFusion() {
return true;
};
- auto m = std::make_shared(log, "SoftPlusFusion");
+ auto m = std::make_shared(log, matcher_name);
register_matcher(m, callback);
}
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_to_mish_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_to_mish_fusion.cpp
index 798da885021..438f047de7b 100644
--- a/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_to_mish_fusion.cpp
+++ b/inference-engine/src/transformations/src/transformations/common_optimizations/softplus_to_mish_fusion.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
+#include "itt.hpp"
#include "transformations/common_optimizations/softplus_to_mish_fusion.hpp"
#include
@@ -14,6 +15,7 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::SoftPlusToMishFusion, "SoftPlusToMishFusion", 0);
ngraph::pass::SoftPlusToMishFusion::SoftPlusToMishFusion() {
+ MATCHER_SCOPE(SoftPlusToMishFusion);
auto input = ngraph::pattern::any_input();
auto softplus = ngraph::pattern::wrap_type({input}, pattern::consumers_count(1));
auto tanh = ngraph::pattern::wrap_type({softplus}, pattern::consumers_count(1));
@@ -33,6 +35,6 @@ ngraph::pass::SoftPlusToMishFusion::SoftPlusToMishFusion() {
return true;
};
- auto m = std::make_shared(mul, "SoftPlusToMishFusion");
+ auto m = std::make_shared