From d6721c395bc57aa6738e7366a3822461d7d3c879 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 25 Dec 2020 17:20:48 +0300 Subject: [PATCH] Added CC macros to validate, clone and visit (#3730) * Added CC macros to validate, clone and visit * Fixed names * Fixed code style * Add exceptions * Revert "Add exceptions" This reverts commit 0489ba376f762f1f79b374d79ff63b561662a2c9. * Update ngraph macros, throw an exception if code is disable for nGraph op * Fixed code style * Simplified NGRAPH_OP_SCOPE * Changed TYPE_CASE macros * Fixed compilation * Fixed code style * Fixed build --- ngraph/core/src/itt.hpp | 11 ++- ngraph/core/src/op/abs.cpp | 9 +- ngraph/core/src/op/acos.cpp | 9 +- ngraph/core/src/op/acosh.cpp | 6 +- ngraph/core/src/op/add.cpp | 10 +- ngraph/core/src/op/and.cpp | 10 +- ngraph/core/src/op/asin.cpp | 9 +- ngraph/core/src/op/asinh.cpp | 6 +- ngraph/core/src/op/assign.cpp | 4 + ngraph/core/src/op/atan.cpp | 9 +- ngraph/core/src/op/atanh.cpp | 6 +- ngraph/core/src/op/avg_pool.cpp | 4 + ngraph/core/src/op/batch_norm.cpp | 7 ++ ngraph/core/src/op/batch_to_space.cpp | 7 +- ngraph/core/src/op/binary_convolution.cpp | 4 + ngraph/core/src/op/broadcast.cpp | 17 ++-- ngraph/core/src/op/bucketize.cpp | 4 + ngraph/core/src/op/ceiling.cpp | 8 +- ngraph/core/src/op/clamp.cpp | 11 +-- ngraph/core/src/op/concat.cpp | 15 +-- ngraph/core/src/op/constant.cpp | 13 ++- ngraph/core/src/op/convert.cpp | 16 ++- ngraph/core/src/op/convert_like.cpp | 4 + ngraph/core/src/op/convolution.cpp | 7 ++ ngraph/core/src/op/cos.cpp | 9 +- ngraph/core/src/op/cosh.cpp | 9 +- ngraph/core/src/op/ctc_greedy_decoder.cpp | 4 + ngraph/core/src/op/ctc_loss.cpp | 4 + ngraph/core/src/op/cum_sum.cpp | 4 + ngraph/core/src/op/deformable_convolution.cpp | 20 ++-- .../core/src/op/deformable_psroi_pooling.cpp | 4 + ngraph/core/src/op/depth_to_space.cpp | 7 +- ngraph/core/src/op/detection_output.cpp | 4 + ngraph/core/src/op/divide.cpp | 10 +- ngraph/core/src/op/elu.cpp | 4 + ngraph/core/src/op/embedding_segments_sum.cpp | 3 + .../core/src/op/embeddingbag_offsets_sum.cpp | 4 +- ngraph/core/src/op/embeddingbag_packedsum.cpp | 2 + ngraph/core/src/op/equal.cpp | 8 +- ngraph/core/src/op/erf.cpp | 9 +- ngraph/core/src/op/exp.cpp | 9 +- ngraph/core/src/op/extractimagepatches.cpp | 4 + ngraph/core/src/op/fake_quantize.cpp | 4 + ngraph/core/src/op/floor.cpp | 9 +- ngraph/core/src/op/floor_mod.cpp | 9 +- ngraph/core/src/op/gather.cpp | 11 ++- ngraph/core/src/op/gather_elements.cpp | 4 + ngraph/core/src/op/gather_nd.cpp | 7 +- ngraph/core/src/op/gather_tree.cpp | 4 + ngraph/core/src/op/gelu.cpp | 3 + ngraph/core/src/op/greater.cpp | 8 +- ngraph/core/src/op/greater_eq.cpp | 9 +- ngraph/core/src/op/grn.cpp | 3 + ngraph/core/src/op/group_conv.cpp | 6 ++ ngraph/core/src/op/gru_cell.cpp | 3 + ngraph/core/src/op/gru_sequence.cpp | 4 + ngraph/core/src/op/hard_sigmoid.cpp | 3 + ngraph/core/src/op/hsigmoid.cpp | 9 +- ngraph/core/src/op/hswish.cpp | 9 +- ngraph/core/src/op/interpolate.cpp | 10 +- ngraph/core/src/op/less.cpp | 8 +- ngraph/core/src/op/less_eq.cpp | 8 +- ngraph/core/src/op/log.cpp | 9 +- ngraph/core/src/op/log_softmax.cpp | 3 + ngraph/core/src/op/loop.cpp | 21 ++-- ngraph/core/src/op/lrn.cpp | 4 + ngraph/core/src/op/lstm_cell.cpp | 16 ++- ngraph/core/src/op/lstm_sequence.cpp | 16 ++- ngraph/core/src/op/matmul.cpp | 12 +-- ngraph/core/src/op/max.cpp | 8 +- ngraph/core/src/op/max_pool.cpp | 7 +- ngraph/core/src/op/maximum.cpp | 8 +- ngraph/core/src/op/min.cpp | 8 +- ngraph/core/src/op/minimum.cpp | 8 +- ngraph/core/src/op/mish.cpp | 10 +- ngraph/core/src/op/mod.cpp | 3 + ngraph/core/src/op/multiply.cpp | 16 ++- ngraph/core/src/op/mvn.cpp | 7 ++ ngraph/core/src/op/negative.cpp | 10 +- ngraph/core/src/op/non_max_suppression.cpp | 15 ++- ngraph/core/src/op/non_zero.cpp | 16 ++- ngraph/core/src/op/normalize_l2.cpp | 3 + ngraph/core/src/op/not.cpp | 10 +- ngraph/core/src/op/not_equal.cpp | 9 +- ngraph/core/src/op/one_hot.cpp | 20 ++-- ngraph/core/src/op/or.cpp | 8 +- ngraph/core/src/op/pad.cpp | 41 ++++---- ngraph/core/src/op/parameter.cpp | 5 + ngraph/core/src/op/power.cpp | 8 +- ngraph/core/src/op/prelu.cpp | 9 +- ngraph/core/src/op/prior_box.cpp | 10 +- ngraph/core/src/op/prior_box_clustered.cpp | 11 +-- ngraph/core/src/op/proposal.cpp | 6 ++ ngraph/core/src/op/psroi_pooling.cpp | 4 + ngraph/core/src/op/quantize.cpp | 3 + ngraph/core/src/op/range.cpp | 36 +++---- ngraph/core/src/op/read_value.cpp | 4 + ngraph/core/src/op/reduce_l1.cpp | 9 +- ngraph/core/src/op/reduce_l2.cpp | 10 +- ngraph/core/src/op/reduce_logical_and.cpp | 14 ++- ngraph/core/src/op/reduce_logical_or.cpp | 14 ++- ngraph/core/src/op/reduce_mean.cpp | 8 +- ngraph/core/src/op/reduce_prod.cpp | 10 +- ngraph/core/src/op/reduce_sum.cpp | 9 +- ngraph/core/src/op/region_yolo.cpp | 4 + ngraph/core/src/op/relu.cpp | 9 +- ngraph/core/src/op/reorg_yolo.cpp | 4 + ngraph/core/src/op/reshape.cpp | 21 ++-- ngraph/core/src/op/result.cpp | 18 ++-- ngraph/core/src/op/reverse.cpp | 14 +-- ngraph/core/src/op/reverse_sequence.cpp | 4 + ngraph/core/src/op/rnn_cell.cpp | 3 + ngraph/core/src/op/rnn_sequence.cpp | 4 + ngraph/core/src/op/roi_align.cpp | 12 +-- ngraph/core/src/op/roi_pooling.cpp | 4 + ngraph/core/src/op/round.cpp | 12 +-- .../core/src/op/scatter_elements_update.cpp | 22 ++--- ngraph/core/src/op/scatter_nd_update.cpp | 2 + ngraph/core/src/op/scatter_update.cpp | 11 +-- ngraph/core/src/op/select.cpp | 13 ++- ngraph/core/src/op/selu.cpp | 3 + ngraph/core/src/op/shape_of.cpp | 20 ++-- ngraph/core/src/op/shuffle_channels.cpp | 7 +- ngraph/core/src/op/sigmoid.cpp | 8 +- ngraph/core/src/op/sign.cpp | 9 +- ngraph/core/src/op/sin.cpp | 9 +- ngraph/core/src/op/sinh.cpp | 9 +- ngraph/core/src/op/softmax.cpp | 12 +-- ngraph/core/src/op/softplus.cpp | 10 +- ngraph/core/src/op/space_to_batch.cpp | 7 +- ngraph/core/src/op/space_to_depth.cpp | 7 +- ngraph/core/src/op/split.cpp | 14 +-- ngraph/core/src/op/sqrt.cpp | 9 +- ngraph/core/src/op/squared_difference.cpp | 3 + ngraph/core/src/op/squeeze.cpp | 9 +- ngraph/core/src/op/strided_slice.cpp | 29 +++--- ngraph/core/src/op/subtract.cpp | 8 +- ngraph/core/src/op/swish.cpp | 10 +- ngraph/core/src/op/tan.cpp | 9 +- ngraph/core/src/op/tanh.cpp | 9 +- ngraph/core/src/op/tensor_iterator.cpp | 16 +-- ngraph/core/src/op/tile.cpp | 7 +- ngraph/core/src/op/topk.cpp | 99 +++++++++---------- ngraph/core/src/op/transpose.cpp | 10 +- ngraph/core/src/op/unsqueeze.cpp | 10 +- .../core/src/op/util/arithmetic_reduction.cpp | 2 + .../util/arithmetic_reductions_keep_dims.cpp | 5 +- .../op/util/binary_elementwise_arithmetic.cpp | 3 + .../op/util/binary_elementwise_comparison.cpp | 3 + .../op/util/binary_elementwise_logical.cpp | 3 + ngraph/core/src/op/util/broadcast_base.cpp | 98 +++++++++--------- .../src/op/util/embeddingbag_offsets_base.cpp | 3 + .../src/op/util/embeddingbag_packed_base.cpp | 3 + ngraph/core/src/op/util/fused_op.cpp | 2 + ngraph/core/src/op/util/index_reduction.cpp | 3 + ngraph/core/src/op/util/logical_reduction.cpp | 2 + .../op/util/logical_reduction_keep_dims.cpp | 5 +- ngraph/core/src/op/util/rnn_cell_base.cpp | 2 + ngraph/core/src/op/util/scatter_base.cpp | 3 + ngraph/core/src/op/util/scatter_nd_base.cpp | 6 +- .../op/util/unary_elementwise_arithmetic.cpp | 3 + ngraph/core/src/op/variadic_split.cpp | 7 +- ngraph/core/src/op/xor.cpp | 17 ++-- ngraph/test/CMakeLists.txt | 31 +----- .../ngraph_cc_collect.cpp | 58 +++++++++++ .../conditional_compilation/ngraph_cc_off.cpp | 52 ++++++++++ .../conditional_compilation/ngraph_cc_on.cpp | 57 +++++++++++ .../include/openvino/cc/selective_build.h | 2 +- 168 files changed, 1064 insertions(+), 719 deletions(-) create mode 100644 ngraph/test/conditional_compilation/ngraph_cc_collect.cpp create mode 100644 ngraph/test/conditional_compilation/ngraph_cc_off.cpp create mode 100644 ngraph/test/conditional_compilation/ngraph_cc_on.cpp diff --git a/ngraph/core/src/itt.hpp b/ngraph/core/src/itt.hpp index eb49bf07eb9..59eae16bf5c 100644 --- a/ngraph/core/src/itt.hpp +++ b/ngraph/core/src/itt.hpp @@ -36,13 +36,18 @@ namespace ngraph OV_ITT_DOMAIN(ngraph_op, "nGraph::Op"); } } - OV_CC_DOMAINS(ngraph_op); } +OV_CC_DOMAINS(ngraph_op); -#if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER) +#if defined(SELECTIVE_BUILD_ANALYZER) #define NGRAPH_OP_SCOPE(region) OV_SCOPE(ngraph_op, region) +#elif defined(SELECTIVE_BUILD) +#define NGRAPH_OP_SCOPE(region) \ + if (OV_CC_SCOPE_IS_ENABLED(OV_CC_CAT3(ngraph_op, _, region)) == 0) \ + throw ngraph::ngraph_error(std::string(OV_CC_TOSTRING(OV_CC_CAT3(ngraph_op, _, region))) + \ + " is disabled!") #else -#define NGRAPH_OP_SCOPE(region) OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region); +#define NGRAPH_OP_SCOPE(region) OV_ITT_SCOPED_TASK(ngraph::itt::domains::ngraph_op, #region) #endif #define NGRAPH_TYPE_CASE(region, a, ...) \ diff --git a/ngraph/core/src/op/abs.cpp b/ngraph/core/src/op/abs.cpp index 1c66c5fe4c9..6b9acfadab3 100644 --- a/ngraph/core/src/op/abs.cpp +++ b/ngraph/core/src/op/abs.cpp @@ -36,6 +36,7 @@ op::Abs::Abs(const Output& arg) shared_ptr op::Abs::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Abs_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -73,10 +74,6 @@ namespace absop bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v0_Abs_evaluate) - { - rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return rc; + NGRAPH_OP_SCOPE(v0_Abs_evaluate); + return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/acos.cpp b/ngraph/core/src/op/acos.cpp index d8de50d27f9..68796fe24cb 100644 --- a/ngraph/core/src/op/acos.cpp +++ b/ngraph/core/src/op/acos.cpp @@ -45,6 +45,7 @@ op::Acos::Acos(const Output& arg) shared_ptr op::Acos::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Acos_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -81,10 +82,6 @@ namespace acosop bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v0_Acos_evaluate) - { - rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return rc; + NGRAPH_OP_SCOPE(v0_Acos_evaluate); + return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/acosh.cpp b/ngraph/core/src/op/acosh.cpp index b1168dfb1d3..287c8e03726 100644 --- a/ngraph/core/src/op/acosh.cpp +++ b/ngraph/core/src/op/acosh.cpp @@ -36,6 +36,7 @@ op::v3::Acosh::Acosh(const Output& arg) shared_ptr op::v3::Acosh::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_Acosh_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -70,7 +71,6 @@ namespace acoshop bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v3_Acosh_evaluate) { rc = acoshop::evaluate_acosh(inputs[0], outputs[0]); } - return rc; + NGRAPH_OP_SCOPE(v3_Acosh_evaluate); + return acoshop::evaluate_acosh(inputs[0], outputs[0]); } diff --git a/ngraph/core/src/op/add.cpp b/ngraph/core/src/op/add.cpp index b6eb9d5bb3a..5bd22dfb0cd 100644 --- a/ngraph/core/src/op/add.cpp +++ b/ngraph/core/src/op/add.cpp @@ -81,22 +81,20 @@ op::v1::Add::Add(const Output& arg0, bool op::v1::Add::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Add_visit_attributes); BinaryElementwiseArithmetic::visit_attributes(visitor); return true; } shared_ptr op::v1::Add::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Add_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v1_Add_evaluate) - { - rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob()); - } - return rc; + NGRAPH_OP_SCOPE(v1_Add_evaluate); + return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/and.cpp b/ngraph/core/src/op/and.cpp index 70efe6beb10..6c5377ffa53 100644 --- a/ngraph/core/src/op/and.cpp +++ b/ngraph/core/src/op/and.cpp @@ -34,12 +34,14 @@ op::v1::LogicalAnd::LogicalAnd(const Output& arg0, bool op::v1::LogicalAnd::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_LogicalAnd_visit_attributes); BinaryElementwiseLogical::visit_attributes(visitor); return true; } shared_ptr op::v1::LogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_LogicalAnd_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -86,10 +88,6 @@ namespace logand bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate) - { - rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()); - } - return rc; + NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate); + return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/asin.cpp b/ngraph/core/src/op/asin.cpp index ce913916ca7..98f0a6b436b 100644 --- a/ngraph/core/src/op/asin.cpp +++ b/ngraph/core/src/op/asin.cpp @@ -46,6 +46,7 @@ op::Asin::Asin(const Output& arg) shared_ptr op::Asin::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Asin_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -82,10 +83,6 @@ namespace asinop bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v0_Asin_evaluate) - { - rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return rc; + NGRAPH_OP_SCOPE(v0_Asin_evaluate); + return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/asinh.cpp b/ngraph/core/src/op/asinh.cpp index 7b3afbad488..1b65cb24ead 100644 --- a/ngraph/core/src/op/asinh.cpp +++ b/ngraph/core/src/op/asinh.cpp @@ -36,6 +36,7 @@ op::v3::Asinh::Asinh(const Output& arg) shared_ptr op::v3::Asinh::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_Asinh_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -70,7 +71,6 @@ namespace asinhop bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v3_Asinh_evaluate) { rc = asinhop::evaluate_asinh(inputs[0], outputs[0]); } - return rc; + NGRAPH_OP_SCOPE(v3_Asinh_evaluate); + return asinhop::evaluate_asinh(inputs[0], outputs[0]); } diff --git a/ngraph/core/src/op/assign.cpp b/ngraph/core/src/op/assign.cpp index b0710e5ceb3..04f385bcd71 100644 --- a/ngraph/core/src/op/assign.cpp +++ b/ngraph/core/src/op/assign.cpp @@ -16,6 +16,7 @@ #include "ngraph/op/assign.hpp" #include +#include "itt.hpp" #include "ngraph/op/read_value.hpp" using namespace std; @@ -32,6 +33,7 @@ op::v3::Assign::Assign(const Output& new_value, const std::string& variabl void op::v3::Assign::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_Assign_validate_and_infer_types); auto value = input_value(0); auto arg_t = get_input_element_type(0); auto output_shape = get_input_partial_shape(0); @@ -78,12 +80,14 @@ void op::v3::Assign::validate_and_infer_types() shared_ptr op::v3::Assign::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_Assign_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_variable_id); } bool op::v3::Assign::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_Assign_visit_attributes); visitor.on_attribute("variable_id", m_variable_id); return true; } diff --git a/ngraph/core/src/op/atan.cpp b/ngraph/core/src/op/atan.cpp index 41a11d67a46..fceb20da284 100644 --- a/ngraph/core/src/op/atan.cpp +++ b/ngraph/core/src/op/atan.cpp @@ -45,6 +45,7 @@ op::Atan::Atan(const Output& arg) shared_ptr op::Atan::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Atan_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -81,10 +82,6 @@ namespace atanop bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v0_Atan_evaluate) - { - rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return rc; + NGRAPH_OP_SCOPE(v0_Atan_evaluate); + return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/atanh.cpp b/ngraph/core/src/op/atanh.cpp index ca2eb9d1df5..8497a2c1011 100644 --- a/ngraph/core/src/op/atanh.cpp +++ b/ngraph/core/src/op/atanh.cpp @@ -36,6 +36,7 @@ op::v3::Atanh::Atanh(const Output& arg) shared_ptr op::v3::Atanh::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_Atanh_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -70,7 +71,6 @@ namespace atanhop bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - bool rc = false; - NGRAPH_OP_SCOPE(v3_Atanh_evaluate) { rc = atanhop::evaluate_atanh(inputs[0], outputs[0]); } - return rc; + NGRAPH_OP_SCOPE(v3_Atanh_evaluate); + return atanhop::evaluate_atanh(inputs[0], outputs[0]); } diff --git a/ngraph/core/src/op/avg_pool.cpp b/ngraph/core/src/op/avg_pool.cpp index 64bc11acd70..e4b63a41537 100644 --- a/ngraph/core/src/op/avg_pool.cpp +++ b/ngraph/core/src/op/avg_pool.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/avg_pool.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/validation_util.hpp" @@ -65,6 +66,7 @@ op::v1::AvgPool::AvgPool(const Output& arg, bool op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_AvgPool_visit_attributes); visitor.on_attribute("kernel", m_kernel); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); @@ -77,6 +79,7 @@ bool op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) void op::v1::AvgPool::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_AvgPool_validate_and_infer_types); if (0 == m_strides.size()) { m_strides = Strides(m_kernel.size(), 1); @@ -214,6 +217,7 @@ void op::v1::AvgPool::set_rounding_type(op::RoundingType rounding_type) shared_ptr op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_AvgPool_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_strides, diff --git a/ngraph/core/src/op/batch_norm.cpp b/ngraph/core/src/op/batch_norm.cpp index a778c4c15a6..9dae5ccda88 100644 --- a/ngraph/core/src/op/batch_norm.cpp +++ b/ngraph/core/src/op/batch_norm.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/batch_norm.hpp" @@ -39,12 +40,14 @@ op::v0::BatchNormInference::BatchNormInference(const Output& input, bool op::v0::BatchNormInference::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_BatchNormInference_visit_attributes); visitor.on_attribute("epsilon", m_epsilon); return true; } void op::v0::BatchNormInference::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_BatchNormInference_validate_and_infer_types); element::Type result_et; PartialShape result_batch_shape; PartialShape result_channel_shape; // unused here @@ -69,6 +72,7 @@ void op::v0::BatchNormInference::validate_and_infer_types() std::shared_ptr op::v0::BatchNormInference::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_BatchNormInference_clone_with_new_inputs); check_new_args_count(this, new_args); return std::make_shared( new_args.at(2), new_args.at(0), new_args.at(1), new_args.at(3), new_args.at(4), m_epsilon); @@ -90,12 +94,14 @@ op::v5::BatchNormInference::BatchNormInference(const Output& input, bool op::v5::BatchNormInference::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_BatchNormInference_visit_attributes); visitor.on_attribute("epsilon", m_epsilon); return true; } void op::v5::BatchNormInference::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_BatchNormInference_validate_and_infer_types); element::Type result_et; PartialShape result_batch_shape; PartialShape result_channel_shape; // unused here @@ -120,6 +126,7 @@ void op::v5::BatchNormInference::validate_and_infer_types() std::shared_ptr op::v5::BatchNormInference::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_BatchNormInference_clone_with_new_inputs); check_new_args_count(this, new_args); return std::make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4), m_epsilon); diff --git a/ngraph/core/src/op/batch_to_space.cpp b/ngraph/core/src/op/batch_to_space.cpp index 20dbd8c0b7e..f72b01d6b3a 100644 --- a/ngraph/core/src/op/batch_to_space.cpp +++ b/ngraph/core/src/op/batch_to_space.cpp @@ -46,6 +46,7 @@ ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output& d void op::v1::BatchToSpace::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_BatchToSpace_validate_and_infer_types); PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); @@ -132,6 +133,7 @@ void op::v1::BatchToSpace::validate_and_infer_types() std::shared_ptr ngraph::op::v1::BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_BatchToSpace_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); @@ -139,6 +141,7 @@ std::shared_ptr bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_BatchToSpace_visit_attributes); return true; } @@ -259,6 +262,6 @@ namespace bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_BatchToSpace) { return batch_to_space_evaluate(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_BatchToSpace); + return batch_to_space_evaluate(outputs, inputs); } diff --git a/ngraph/core/src/op/binary_convolution.cpp b/ngraph/core/src/op/binary_convolution.cpp index a829a1b4be4..5a91a8edd9f 100644 --- a/ngraph/core/src/op/binary_convolution.cpp +++ b/ngraph/core/src/op/binary_convolution.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/binary_convolution.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/axis_vector.hpp" #include "ngraph/coordinate_diff.hpp" @@ -71,6 +72,7 @@ op::v1::BinaryConvolution::BinaryConvolution(const Output& data, void op::v1::BinaryConvolution::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_BinaryConvolution_validate_and_infer_types); const PartialShape& data_batch_shape = get_input_partial_shape(0); element::Type data_batch_et = get_input_element_type(0); const PartialShape& filters_shape = get_input_partial_shape(1); @@ -152,6 +154,7 @@ void op::v1::BinaryConvolution::validate_and_infer_types() shared_ptr op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_BinaryConvolution_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), @@ -166,6 +169,7 @@ shared_ptr bool op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_BinaryConvolution_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); visitor.on_attribute("pads_end", m_pads_end); diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index 93cba402c78..756455f6e26 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -161,6 +161,7 @@ bool op::v3::Broadcast::broadcast_evaluate(const HostTensorVector& outputs, void op::v3::Broadcast::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_Broadcast_validate_and_infer_types); if (m_mode.m_type == BroadcastType::NONE) { NODE_VALIDATION_CHECK(this, @@ -204,6 +205,7 @@ void op::v3::Broadcast::validate_and_infer_types() shared_ptr op::v3::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_Broadcast_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 2) { @@ -221,6 +223,7 @@ shared_ptr op::v3::Broadcast::clone_with_new_inputs(const OutputVector& ne bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_Broadcast_visit_attributes); visitor.on_attribute("mode", m_mode); return true; } @@ -228,8 +231,8 @@ bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor) bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v3_Broadcast_evaluate) { return broadcast_evaluate(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v3_Broadcast_evaluate); + return broadcast_evaluate(outputs, inputs); } namespace @@ -275,6 +278,7 @@ op::v1::Broadcast::Broadcast(const Output& arg, void op::v1::Broadcast::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Broadcast_validate_and_infer_types); // m_type is deduced and not always explicitly stated, for cases where broadcast // has 2 inputs its always NUMPY mode if (m_broadcast_spec.m_type == AutoBroadcastType::NONE && get_input_size() < 3) @@ -304,6 +308,7 @@ void op::v1::Broadcast::validate_and_infer_types() shared_ptr op::v1::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Broadcast_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), m_broadcast_spec); @@ -311,6 +316,7 @@ shared_ptr op::v1::Broadcast::clone_with_new_inputs(const OutputVector& ne bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Broadcast_visit_attributes); visitor.on_attribute("mode", m_broadcast_spec); return true; } @@ -318,9 +324,6 @@ bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor) bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Broadcast_evaluate) - { - return op::util::BroadcastBase::evaluate(outputs, inputs); - } - return false; + NGRAPH_OP_SCOPE(v1_Broadcast_evaluate); + return op::util::BroadcastBase::evaluate(outputs, inputs); } diff --git a/ngraph/core/src/op/bucketize.cpp b/ngraph/core/src/op/bucketize.cpp index fb1bd237fea..c34513c27cb 100644 --- a/ngraph/core/src/op/bucketize.cpp +++ b/ngraph/core/src/op/bucketize.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "bucketize.hpp" +#include "itt.hpp" using namespace ngraph; using namespace std; @@ -34,6 +35,7 @@ op::v3::Bucketize::Bucketize(const Output& data, bool op::v3::Bucketize::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_Bucketize_visit_attributes); visitor.on_attribute("output_type", m_output_type); visitor.on_attribute("with_right_bound", m_with_right_bound); return true; @@ -41,6 +43,7 @@ bool op::v3::Bucketize::visit_attributes(AttributeVisitor& visitor) void op::v3::Bucketize::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_Bucketize_validate_and_infer_types); const PartialShape& data_pshape = get_input_partial_shape(0); const PartialShape& buckets_pshape = get_input_partial_shape(1); @@ -65,6 +68,7 @@ void op::v3::Bucketize::validate_and_infer_types() shared_ptr op::v3::Bucketize::clone_with_new_inputs(const OutputVector& inputs) const { + NGRAPH_OP_SCOPE(v3_Bucketize_clone_with_new_inputs); check_new_args_count(this, inputs); return make_shared( diff --git a/ngraph/core/src/op/ceiling.cpp b/ngraph/core/src/op/ceiling.cpp index 5e6a627750d..fd5fcd71ab6 100644 --- a/ngraph/core/src/op/ceiling.cpp +++ b/ngraph/core/src/op/ceiling.cpp @@ -34,6 +34,7 @@ op::Ceiling::Ceiling(const Output& arg) shared_ptr op::Ceiling::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Ceiling_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -83,9 +84,6 @@ namespace ceiling bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Ceiling_evaluate) - { - return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Ceiling_evaluate); + return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/clamp.cpp b/ngraph/core/src/op/clamp.cpp index 179ee2886b7..c786054b7ad 100644 --- a/ngraph/core/src/op/clamp.cpp +++ b/ngraph/core/src/op/clamp.cpp @@ -86,12 +86,9 @@ namespace clamp bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Clamp_evaluate) - { - return clamp::evaluate_clamp( - inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Clamp_evaluate); + return clamp::evaluate_clamp( + inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0))); } NGRAPH_RTTI_DEFINITION(op::v0::Clamp, "Clamp", 0); @@ -230,6 +227,7 @@ OutputVector op::Clamp::decompose_op() const shared_ptr op::Clamp::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Clamp_clone_with_new_inputs); NODE_VALIDATION_CHECK(this, new_args.size() == 1, "Expected 1 element in new_args for the Clamp op but got ", @@ -240,6 +238,7 @@ shared_ptr op::Clamp::clone_with_new_inputs(const OutputVector& new_args) bool op::Clamp::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Clamp_visit_attributes); visitor.on_attribute("min", m_min); visitor.on_attribute("max", m_max); return true; diff --git a/ngraph/core/src/op/concat.cpp b/ngraph/core/src/op/concat.cpp index cc3cce010c6..f334a010ba6 100644 --- a/ngraph/core/src/op/concat.cpp +++ b/ngraph/core/src/op/concat.cpp @@ -41,12 +41,14 @@ op::Concat::Concat(const NodeVector& args, int64_t axis) bool op::Concat::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Concat_visit_attributes); visitor.on_attribute("axis", m_axis); return true; } void op::Concat::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Concat_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required."); PartialShape inputs_shape_scheme{PartialShape::dynamic()}; @@ -85,7 +87,8 @@ void op::Concat::validate_and_infer_types() NODE_VALIDATION_CHECK( this, PartialShape::merge_into(inputs_shape_scheme, this_input_shape), - "Argument shapes are inconsistent; they must have the same rank, and must have ", + "Argument shapes are inconsistent; they must have the same rank, and must " + "have ", "equal dimension everywhere except on the concatenation axis (axis ", concat_axis, ")."); @@ -110,6 +113,7 @@ void op::Concat::validate_and_infer_types() shared_ptr op::Concat::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Concat_clone_with_new_inputs); // TODO(amprocte): Should we check the new_args count here? return make_shared(new_args, m_axis); } @@ -144,10 +148,7 @@ namespace bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Concat_evaluate) - { - auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis(); - return evaluate_concat(inputs, outputs[0], concat_axis); - } - return false; + NGRAPH_OP_SCOPE(v0_Concat_evaluate); + auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis(); + return evaluate_concat(inputs, outputs[0], concat_axis); } diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index 0bdea7370da..863c06d235e 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -549,6 +549,7 @@ void op::Constant::set_data_shape(const Shape& shape) shared_ptr op::Constant::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Constant_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(*this); } @@ -625,6 +626,7 @@ bool op::Constant::are_all_data_elements_bitwise_identical() const bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Constant_visit_attributes); visitor.on_attribute("element_type", m_element_type); visitor.on_attribute("shape", m_shape); if (m_data == nullptr) @@ -639,13 +641,10 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor) bool op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Constant_evaluate) - { - auto output = outputs[0]; - output->write(get_data_ptr(), output->get_size_in_bytes()); - return true; - } - return false; + NGRAPH_OP_SCOPE(v0_Constant_evaluate); + auto output = outputs[0]; + output->write(get_data_ptr(), output->get_size_in_bytes()); + return true; } // diff --git a/ngraph/core/src/op/convert.cpp b/ngraph/core/src/op/convert.cpp index 94fdaae85f5..bcb2e843f2e 100644 --- a/ngraph/core/src/op/convert.cpp +++ b/ngraph/core/src/op/convert.cpp @@ -34,17 +34,20 @@ op::Convert::Convert(const Output& arg, const element::Type& destination_t void op::Convert::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Convert_validate_and_infer_types); set_output_type(0, m_destination_type, get_input_partial_shape(0)); } bool op::Convert::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Convert_visit_attributes); visitor.on_attribute("destination_type", m_destination_type); return true; } shared_ptr op::Convert::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Convert_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_destination_type); } @@ -66,10 +69,8 @@ namespace convert #define TYPE_OUT_CASE(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a)) \ - { \ - rc = evaluate(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a)); \ + rc = evaluate(__VA_ARGS__); \ } \ break @@ -119,9 +120,6 @@ namespace convert bool op::v0::Convert::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v0_Convert_evaluate) - { - return convert::evaluate_convert(input_values[0], output_values[0]); - } - return false; + NGRAPH_OP_SCOPE(v0_Convert_evaluate); + return convert::evaluate_convert(input_values[0], output_values[0]); } diff --git a/ngraph/core/src/op/convert_like.cpp b/ngraph/core/src/op/convert_like.cpp index 66c6be44f93..22515d44cee 100644 --- a/ngraph/core/src/op/convert_like.cpp +++ b/ngraph/core/src/op/convert_like.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/op/convert_like.hpp" @@ -31,16 +32,19 @@ op::v1::ConvertLike::ConvertLike(const Output& data, const Output& l void op::v1::ConvertLike::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_ConvertLike_validate_and_infer_types); set_output_type(0, get_input_element_type(1), get_input_partial_shape(0)); } bool op::v1::ConvertLike::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_ConvertLike_visit_attributes); return true; } shared_ptr op::v1::ConvertLike::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ConvertLike_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1)); } diff --git a/ngraph/core/src/op/convolution.cpp b/ngraph/core/src/op/convolution.cpp index 8c5f31a86e5..8842247dd8a 100644 --- a/ngraph/core/src/op/convolution.cpp +++ b/ngraph/core/src/op/convolution.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/convolution.hpp" +#include "itt.hpp" #include "ngraph/axis_vector.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/reshape.hpp" @@ -46,6 +47,7 @@ op::v1::Convolution::Convolution(const Output& data_batch, bool op::v1::Convolution::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Convolution_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("dilations", m_dilations); visitor.on_attribute("pads_begin", m_pads_begin); @@ -56,6 +58,7 @@ bool op::v1::Convolution::visit_attributes(AttributeVisitor& visitor) void op::v1::Convolution::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Convolution_validate_and_infer_types); const PartialShape& data_batch_shape = get_input_partial_shape(0); element::Type data_batch_et = get_input_element_type(0); const PartialShape& filters_shape = get_input_partial_shape(1); @@ -145,6 +148,7 @@ void op::v1::Convolution::validate_and_infer_types() shared_ptr op::v1::Convolution::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Convolution_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), @@ -183,6 +187,7 @@ op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output& dat bool op::v1::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("dilations", m_dilations); visitor.on_attribute("pads_begin", m_pads_begin); @@ -291,6 +296,7 @@ void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( void op::v1::ConvolutionBackpropData::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_validate_and_infer_types); auto data_pshape = get_input_partial_shape(0); element::Type delta_et = get_input_element_type(0); const PartialShape& filters_pshape = get_input_partial_shape(1); @@ -432,6 +438,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() shared_ptr op::v1::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { diff --git a/ngraph/core/src/op/cos.cpp b/ngraph/core/src/op/cos.cpp index ad9020f223d..6d4f5e571bd 100644 --- a/ngraph/core/src/op/cos.cpp +++ b/ngraph/core/src/op/cos.cpp @@ -37,11 +37,13 @@ op::Cos::Cos(const Output& arg) bool op::Cos::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Cos_visit_attributes); return true; } shared_ptr op::Cos::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Cos_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -78,9 +80,6 @@ namespace cosop bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Cos_evaluate) - { - return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Cos_evaluate); + return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/cosh.cpp b/ngraph/core/src/op/cosh.cpp index f1701321d67..5651f84544c 100644 --- a/ngraph/core/src/op/cosh.cpp +++ b/ngraph/core/src/op/cosh.cpp @@ -36,11 +36,13 @@ op::Cosh::Cosh(const Output& arg) bool op::Cosh::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Cosh_visit_attributes); return true; } shared_ptr op::Cosh::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Cosh_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -77,9 +79,6 @@ namespace coshop bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Cosh_evaluate) - { - return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Cosh_evaluate); + return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/ctc_greedy_decoder.cpp b/ngraph/core/src/op/ctc_greedy_decoder.cpp index 3c9b9726fe4..c86f9dc0e80 100644 --- a/ngraph/core/src/op/ctc_greedy_decoder.cpp +++ b/ngraph/core/src/op/ctc_greedy_decoder.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/ctc_greedy_decoder.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -32,6 +33,7 @@ op::CTCGreedyDecoder::CTCGreedyDecoder(const Output& input, void op::CTCGreedyDecoder::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_CTCGreedyDecoder_validate_and_infer_types); const auto& logits_pshape = get_input_partial_shape(0); const auto& seq_mask_pshape = get_input_partial_shape(1); auto input_et = get_input_element_type(0); @@ -99,12 +101,14 @@ void op::CTCGreedyDecoder::validate_and_infer_types() bool op::CTCGreedyDecoder::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_CTCGreedyDecoder_visit_attributes); visitor.on_attribute("ctc_merge_repeated", m_ctc_merge_repeated); return true; } shared_ptr op::CTCGreedyDecoder::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_CTCGreedyDecoder_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_ctc_merge_repeated); } diff --git a/ngraph/core/src/op/ctc_loss.cpp b/ngraph/core/src/op/ctc_loss.cpp index d0036639304..b84fd0f626e 100644 --- a/ngraph/core/src/op/ctc_loss.cpp +++ b/ngraph/core/src/op/ctc_loss.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/ctc_loss.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -54,6 +55,7 @@ op::v4::CTCLoss::CTCLoss(const Output& logits, void op::v4::CTCLoss::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_CTCLoss_validate_and_infer_types); // check types of input tensors const auto& logits_type = get_input_element_type(0); const auto& logit_length_type = get_input_element_type(1); @@ -229,6 +231,7 @@ void op::v4::CTCLoss::validate_and_infer_types() bool op::v4::CTCLoss::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_CTCLoss_visit_attributes); visitor.on_attribute("preprocess_collapse_repeated", preprocess_collapse_repeated_); visitor.on_attribute("ctc_merge_repeated", ctc_merge_repeated_); visitor.on_attribute("unique", unique_); @@ -237,6 +240,7 @@ bool op::v4::CTCLoss::visit_attributes(AttributeVisitor& visitor) shared_ptr op::v4::CTCLoss::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_CTCLoss_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { diff --git a/ngraph/core/src/op/cum_sum.cpp b/ngraph/core/src/op/cum_sum.cpp index c00b80766e3..9bf01c3a2cb 100644 --- a/ngraph/core/src/op/cum_sum.cpp +++ b/ngraph/core/src/op/cum_sum.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/cum_sum.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/op/broadcast.hpp" @@ -46,6 +47,7 @@ op::v0::CumSum::CumSum(const Output& arg, const bool exclusive, const bool bool op::v0::CumSum::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_CumSum_visit_attributes); visitor.on_attribute("exclusive", m_exclusive); visitor.on_attribute("reverse", m_reverse); return true; @@ -53,6 +55,7 @@ bool op::v0::CumSum::visit_attributes(AttributeVisitor& visitor) void op::v0::CumSum::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_CumSum_validate_and_infer_types); element::Type arg_type = get_input_element_type(0); PartialShape arg_shape = get_input_partial_shape(0); set_output_type(0, arg_type, arg_shape); @@ -73,6 +76,7 @@ void op::v0::CumSum::validate_and_infer_types() shared_ptr op::v0::CumSum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_CumSum_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_exclusive, m_reverse); } diff --git a/ngraph/core/src/op/deformable_convolution.cpp b/ngraph/core/src/op/deformable_convolution.cpp index 8853ee88d10..5963551c9dc 100644 --- a/ngraph/core/src/op/deformable_convolution.cpp +++ b/ngraph/core/src/op/deformable_convolution.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/deformable_convolution.hpp" +#include "itt.hpp" #include "ngraph/axis_vector.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/reshape.hpp" @@ -50,6 +51,7 @@ op::v1::DeformableConvolution::DeformableConvolution(const Output& arg, bool op::v1::DeformableConvolution::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_DeformableConvolution_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("dilations", m_dilations); visitor.on_attribute("pads_begin", m_pads_begin); @@ -62,6 +64,7 @@ bool op::v1::DeformableConvolution::visit_attributes(AttributeVisitor& visitor) void op::v1::DeformableConvolution::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_DeformableConvolution_validate_and_infer_types); const PartialShape& data_batch_shape = get_input_partial_shape(0); const PartialShape& deformable_values_shape = get_input_partial_shape(1); const PartialShape& filters_shape = get_input_partial_shape(2); @@ -101,14 +104,14 @@ void op::v1::DeformableConvolution::validate_and_infer_types() if (m_deformable_group > 1 && deformable_values_shape[1].is_static()) { - NODE_VALIDATION_CHECK( - this, - deformable_values_shape[1].get_length() % m_deformable_group == 0, - "The deformable values input must be evenly divisible by the 'deformable group' value " - "along the channels axis. Current input shape: ", - deformable_values_shape, - ", 'deformable group' attribute value: ", - m_deformable_group); + NODE_VALIDATION_CHECK(this, + deformable_values_shape[1].get_length() % m_deformable_group == 0, + "The deformable values input must be evenly divisible by the " + "'deformable group' value " + "along the channels axis. Current input shape: ", + deformable_values_shape, + ", 'deformable group' attribute value: ", + m_deformable_group); } element::Type result_et; @@ -197,6 +200,7 @@ void op::v1::DeformableConvolution::validate_and_infer_types() shared_ptr op::v1::DeformableConvolution::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_DeformableConvolution_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), diff --git a/ngraph/core/src/op/deformable_psroi_pooling.cpp b/ngraph/core/src/op/deformable_psroi_pooling.cpp index 8eb31565e78..05b48e2ebd3 100644 --- a/ngraph/core/src/op/deformable_psroi_pooling.cpp +++ b/ngraph/core/src/op/deformable_psroi_pooling.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "deformable_psroi_pooling.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -70,6 +71,7 @@ op::v1::DeformablePSROIPooling::DeformablePSROIPooling(const Output& input bool op::v1::DeformablePSROIPooling::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_DeformablePSROIPooling_visit_attributes); visitor.on_attribute("output_dim", m_output_dim); visitor.on_attribute("spatial_scale", m_spatial_scale); visitor.on_attribute("group_size", m_group_size); @@ -83,6 +85,7 @@ bool op::v1::DeformablePSROIPooling::visit_attributes(AttributeVisitor& visitor) void op::v1::DeformablePSROIPooling::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_DeformablePSROIPooling_validate_and_infer_types); const auto& input_et = get_input_element_type(0); const auto& input_pshape = get_input_partial_shape(0); @@ -128,6 +131,7 @@ void op::v1::DeformablePSROIPooling::validate_and_infer_types() shared_ptr op::v1::DeformablePSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_DeformablePSROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { diff --git a/ngraph/core/src/op/depth_to_space.cpp b/ngraph/core/src/op/depth_to_space.cpp index 616d3620af7..8de75a57092 100644 --- a/ngraph/core/src/op/depth_to_space.cpp +++ b/ngraph/core/src/op/depth_to_space.cpp @@ -54,6 +54,7 @@ op::DepthToSpace::DepthToSpace(const Output& data, bool op::DepthToSpace::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_DepthToSpace_visit_attributes); visitor.on_attribute("block_size", m_blocksize); visitor.on_attribute("mode", m_mode); return true; @@ -61,6 +62,7 @@ bool op::DepthToSpace::visit_attributes(AttributeVisitor& visitor) shared_ptr op::DepthToSpace::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_DepthToSpace_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Incorrect number of new arguments"); @@ -70,6 +72,7 @@ shared_ptr op::DepthToSpace::clone_with_new_inputs(const OutputVector& new void op::DepthToSpace::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_DepthToSpace_validate_and_infer_types); PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); @@ -243,8 +246,8 @@ bool op::DepthToSpace::evaluate_depth_to_space(const HostTensorVector& outputs, bool op::DepthToSpace::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate) { return evaluate_depth_to_space(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate); + return evaluate_depth_to_space(outputs, inputs); } namespace ngraph { diff --git a/ngraph/core/src/op/detection_output.cpp b/ngraph/core/src/op/detection_output.cpp index e0471495bb0..21d2119bcbf 100644 --- a/ngraph/core/src/op/detection_output.cpp +++ b/ngraph/core/src/op/detection_output.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/detection_output.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -45,6 +46,7 @@ op::DetectionOutput::DetectionOutput(const Output& box_logits, void op::DetectionOutput::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_DetectionOutput_validate_and_infer_types); NODE_VALIDATION_CHECK( this, m_attrs.num_classes > 0, "Number of classes must be greater than zero"); @@ -266,6 +268,7 @@ void op::DetectionOutput::validate_and_infer_types() shared_ptr op::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_DetectionOutput_clone_with_new_inputs); check_new_args_count(this, new_args); auto num_args = new_args.size(); @@ -291,6 +294,7 @@ shared_ptr op::DetectionOutput::clone_with_new_inputs(const OutputVector& bool op::DetectionOutput::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_DetectionOutput_visit_attributes); visitor.on_attribute("num_classes", m_attrs.num_classes); visitor.on_attribute("background_label_id", m_attrs.background_label_id); visitor.on_attribute("top_k", m_attrs.top_k); diff --git a/ngraph/core/src/op/divide.cpp b/ngraph/core/src/op/divide.cpp index 03625897db5..83f914e0246 100644 --- a/ngraph/core/src/op/divide.cpp +++ b/ngraph/core/src/op/divide.cpp @@ -92,6 +92,7 @@ op::v1::Divide::Divide(const Output& arg0, bool op::v1::Divide::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Divide_visit_attributes); BinaryElementwiseArithmetic::visit_attributes(visitor); visitor.on_attribute("m_pythondiv", m_pythondiv); return true; @@ -99,6 +100,7 @@ bool op::v1::Divide::visit_attributes(AttributeVisitor& visitor) shared_ptr op::v1::Divide::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Divide_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob()); @@ -106,10 +108,6 @@ shared_ptr op::v1::Divide::clone_with_new_inputs(const OutputVector& new_a bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Divide_evaluate) - { - return divide::evaluate_divide( - inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv()); - } - return false; + NGRAPH_OP_SCOPE(v1_Divide_evaluate); + return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv()); } diff --git a/ngraph/core/src/op/elu.cpp b/ngraph/core/src/op/elu.cpp index 569d6f31d8b..19b4cab5847 100644 --- a/ngraph/core/src/op/elu.cpp +++ b/ngraph/core/src/op/elu.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/elu.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/autobroadcast.hpp" #include "ngraph/op/constant.hpp" @@ -33,18 +34,21 @@ op::Elu::Elu(const Output& data, const double alpha) bool ngraph::op::v0::Elu::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Elu_visit_attributes); visitor.on_attribute("alpha", m_alpha); return true; } void op::v0::Elu::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Elu_validate_and_infer_types); set_output_size(1); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } shared_ptr op::Elu::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Elu_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_alpha); } diff --git a/ngraph/core/src/op/embedding_segments_sum.cpp b/ngraph/core/src/op/embedding_segments_sum.cpp index 6a2eca7a92b..33a4d5aed04 100644 --- a/ngraph/core/src/op/embedding_segments_sum.cpp +++ b/ngraph/core/src/op/embedding_segments_sum.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/embedding_segments_sum.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/opsets/opset3.hpp" @@ -55,6 +56,7 @@ op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output& emb_table void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_EmbeddingSegmentsSum_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(SEGMENT_IDS) == element::i64 || get_input_element_type(SEGMENT_IDS) == element::i32, @@ -182,6 +184,7 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() shared_ptr op::v3::EmbeddingSegmentsSum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_EmbeddingSegmentsSum_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { diff --git a/ngraph/core/src/op/embeddingbag_offsets_sum.cpp b/ngraph/core/src/op/embeddingbag_offsets_sum.cpp index 93ad5087f17..c55c296fbd0 100644 --- a/ngraph/core/src/op/embeddingbag_offsets_sum.cpp +++ b/ngraph/core/src/op/embeddingbag_offsets_sum.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/embeddingbag_offsets_sum.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" using namespace std; @@ -49,6 +50,7 @@ op::v3::EmbeddingBagOffsetsSum::EmbeddingBagOffsetsSum(const Output& emb_t shared_ptr op::v3::EmbeddingBagOffsetsSum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_EmbeddingBagOffsetsSum_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { @@ -69,4 +71,4 @@ shared_ptr { throw ngraph_error("Incorrect number of arguments"); } -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/embeddingbag_packedsum.cpp b/ngraph/core/src/op/embeddingbag_packedsum.cpp index 8f9ccd3c467..687e887ed7b 100644 --- a/ngraph/core/src/op/embeddingbag_packedsum.cpp +++ b/ngraph/core/src/op/embeddingbag_packedsum.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/embeddingbag_packedsum.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" using namespace std; @@ -38,6 +39,7 @@ op::v3::EmbeddingBagPackedSum::EmbeddingBagPackedSum(const Output& emb_tab shared_ptr op::v3::EmbeddingBagPackedSum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_EmbeddingBagPackedSum_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 2) { diff --git a/ngraph/core/src/op/equal.cpp b/ngraph/core/src/op/equal.cpp index 9ddd14c9e81..39ae4809229 100644 --- a/ngraph/core/src/op/equal.cpp +++ b/ngraph/core/src/op/equal.cpp @@ -77,15 +77,13 @@ op::v1::Equal::Equal(const Output& arg0, shared_ptr op::v1::Equal::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Equal_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Equal_evaluate) - { - return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Equal_evaluate); + return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/erf.cpp b/ngraph/core/src/op/erf.cpp index 96a168adbf7..179895f1a87 100644 --- a/ngraph/core/src/op/erf.cpp +++ b/ngraph/core/src/op/erf.cpp @@ -29,11 +29,13 @@ constexpr NodeTypeInfo op::Erf::type_info; bool ngraph::op::v0::Erf::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Erf_visit_attributes); return true; } shared_ptr op::Erf::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Erf_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -76,9 +78,6 @@ namespace erfop bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Erf_evaluate) - { - return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Erf_evaluate); + return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/exp.cpp b/ngraph/core/src/op/exp.cpp index f63626e96e2..313c73729ab 100644 --- a/ngraph/core/src/op/exp.cpp +++ b/ngraph/core/src/op/exp.cpp @@ -35,11 +35,13 @@ op::Exp::Exp(const Output& arg) bool ngraph::op::v0::Exp::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Exp_visit_attributes); return true; } shared_ptr op::Exp::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Exp_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -76,9 +78,6 @@ namespace expop bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Exp_evaluate) - { - return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Exp_evaluate); + return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/extractimagepatches.cpp b/ngraph/core/src/op/extractimagepatches.cpp index 4c91a4f709c..5c8e790ebba 100644 --- a/ngraph/core/src/op/extractimagepatches.cpp +++ b/ngraph/core/src/op/extractimagepatches.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/extractimagepatches.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" using namespace std; @@ -40,6 +41,7 @@ op::v3::ExtractImagePatches::ExtractImagePatches(const Output& image, void op::v3::ExtractImagePatches::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_ExtractImagePatches_validate_and_infer_types); const PartialShape input_Pshape = get_input_partial_shape(0); NODE_VALIDATION_CHECK(this, input_Pshape.rank() == 4, "input tensor must be 4D tensor."); @@ -148,6 +150,7 @@ void op::v3::ExtractImagePatches::validate_and_infer_types() bool op::v3::ExtractImagePatches::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_ExtractImagePatches_visit_attributes); visitor.on_attribute("sizes", m_patch_sizes); visitor.on_attribute("strides", m_patch_movement_strides); visitor.on_attribute("rates", m_patch_selection_rates); @@ -158,6 +161,7 @@ bool op::v3::ExtractImagePatches::visit_attributes(AttributeVisitor& visitor) shared_ptr op::v3::ExtractImagePatches::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_ExtractImagePatches_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_patch_sizes, diff --git a/ngraph/core/src/op/fake_quantize.cpp b/ngraph/core/src/op/fake_quantize.cpp index 8228684aab4..b534b0e6f46 100644 --- a/ngraph/core/src/op/fake_quantize.cpp +++ b/ngraph/core/src/op/fake_quantize.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "fake_quantize.hpp" #include "ngraph/attribute_visitor.hpp" @@ -56,6 +57,7 @@ op::FakeQuantize::FakeQuantize(const Output& data, void op::FakeQuantize::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_FakeQuantize_validate_and_infer_types); PartialShape data_pshape = get_input_partial_shape(0); for (auto i = 1; i <= 4; i++) @@ -84,6 +86,7 @@ void op::FakeQuantize::validate_and_infer_types() bool ngraph::op::v0::FakeQuantize::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_FakeQuantize_visit_attributes); visitor.on_attribute("levels", m_levels); visitor.on_attribute("auto_broadcast", m_auto_broadcast); return true; @@ -165,6 +168,7 @@ OutputVector op::FakeQuantize::decompose_op() const shared_ptr op::FakeQuantize::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_FakeQuantize_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), // X new_args.at(1), // input_low diff --git a/ngraph/core/src/op/floor.cpp b/ngraph/core/src/op/floor.cpp index ed8e44c83af..226483a574c 100644 --- a/ngraph/core/src/op/floor.cpp +++ b/ngraph/core/src/op/floor.cpp @@ -34,11 +34,13 @@ op::Floor::Floor(const Output& arg) bool ngraph::op::v0::Floor::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Floor_visit_attributes); return true; } shared_ptr op::Floor::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Floor_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -88,9 +90,6 @@ namespace floorop bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Floor_evaluate) - { - return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Floor_evaluate); + return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/floor_mod.cpp b/ngraph/core/src/op/floor_mod.cpp index 8795ac35936..603b2af54f4 100644 --- a/ngraph/core/src/op/floor_mod.cpp +++ b/ngraph/core/src/op/floor_mod.cpp @@ -34,6 +34,7 @@ op::v1::FloorMod::FloorMod(const Output& arg0, shared_ptr op::v1::FloorMod::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_FloorMod_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -82,14 +83,12 @@ namespace floor_mod bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_FloorMod_evaluate) - { - return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_FloorMod_evaluate); + return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob()); } bool op::v1::FloorMod::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_FloorMod_visit_attributes); return true; } diff --git a/ngraph/core/src/op/gather.cpp b/ngraph/core/src/op/gather.cpp index 97cad74c101..6c536573f19 100644 --- a/ngraph/core/src/op/gather.cpp +++ b/ngraph/core/src/op/gather.cpp @@ -47,11 +47,15 @@ op::v1::Gather::Gather(const Output& params, bool ngraph::op::v1::Gather::visit_attributes(AttributeVisitor& visitor) { - return true; + NGRAPH_OP_SCOPE(v1_Gather_visit_attributes); + { + return true; + } } void op::v1::Gather::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Gather_validate_and_infer_types); const auto& input_rank = get_input_partial_shape(PARAMS).rank(); const auto& axis_shape = get_input_partial_shape(AXIS); const auto& axis_rank = axis_shape.rank(); @@ -135,6 +139,7 @@ int64_t op::v1::Gather::get_axis() const shared_ptr op::v1::Gather::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(PARAMS), new_args.at(INDICES), new_args.at(AXIS)); } @@ -313,8 +318,8 @@ bool op::v1::Gather::evaluate_gather(const HostTensorVector& outputs, bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Gather_evaluate) { return evaluate_gather(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_Gather_evaluate); + return evaluate_gather(outputs, inputs); } bool op::v1::Gather::constant_fold(OutputVector& output_values, const OutputVector& input_values) diff --git a/ngraph/core/src/op/gather_elements.cpp b/ngraph/core/src/op/gather_elements.cpp index 894a5a139d6..b68bff6bd85 100644 --- a/ngraph/core/src/op/gather_elements.cpp +++ b/ngraph/core/src/op/gather_elements.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/gather_elements.hpp" +#include "itt.hpp" #include "ngraph/shape.hpp" using namespace std; @@ -35,6 +36,7 @@ op::v6::GatherElements::GatherElements(const Output& data, void op::v6::GatherElements::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v6_GatherElements_validate_and_infer_types); const auto& data_type = get_input_element_type(0); const auto& indices_type = get_input_element_type(1); @@ -120,12 +122,14 @@ void op::v6::GatherElements::validate_and_infer_types() bool op::v6::GatherElements::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v6_GatherElements_visit_attributes); visitor.on_attribute("axis", m_axis); return true; } shared_ptr op::v6::GatherElements::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v6_GatherElements_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_axis); } diff --git a/ngraph/core/src/op/gather_nd.cpp b/ngraph/core/src/op/gather_nd.cpp index 9ad86145abb..55b7e7ad9e6 100644 --- a/ngraph/core/src/op/gather_nd.cpp +++ b/ngraph/core/src/op/gather_nd.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/gather_nd.hpp" +#include "itt.hpp" #include "ngraph/shape.hpp" using namespace std; @@ -35,6 +36,7 @@ op::v5::GatherND::GatherND(const Output& data, void op::v5::GatherND::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_GatherND_validate_and_infer_types); // check types of input tensors const auto& data_type = get_input_element_type(0); const auto& indices_type = get_input_element_type(1); @@ -88,7 +90,8 @@ void op::v5::GatherND::validate_and_infer_types() this, (indices_pshape[indices_pshape.rank().get_length() - 1].get_length() + m_batch_dims) <= data_pshape.rank().get_length(), - "Length of a tuple with indices must not exceed a rank of data tensor excluding " + "Length of a tuple with indices must not exceed a rank of data tensor " + "excluding " "batch dimensions."); } } @@ -148,12 +151,14 @@ void op::v5::GatherND::validate_and_infer_types() bool op::v5::GatherND::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_GatherND_visit_attributes); visitor.on_attribute("batch_dims", m_batch_dims); return true; } shared_ptr op::v5::GatherND::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_GatherND_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_batch_dims); } diff --git a/ngraph/core/src/op/gather_tree.cpp b/ngraph/core/src/op/gather_tree.cpp index 81730edd410..60ad5a34459 100644 --- a/ngraph/core/src/op/gather_tree.cpp +++ b/ngraph/core/src/op/gather_tree.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/gather_tree.hpp" +#include "itt.hpp" #include "ngraph/shape.hpp" using namespace std; @@ -33,6 +34,7 @@ op::v1::GatherTree::GatherTree(const Output& step_ids, shared_ptr op::v1::GatherTree::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_GatherTree_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); @@ -40,11 +42,13 @@ shared_ptr op::v1::GatherTree::clone_with_new_inputs(const OutputVector& n bool ngraph::op::v1::GatherTree::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_GatherTree_visit_attributes); return true; } void op::v1::GatherTree::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_GatherTree_validate_and_infer_types); const auto& step_ids_rank = get_input_partial_shape(0); const auto& parent_idx_rank = get_input_partial_shape(1); const auto& max_seq_len_rank = get_input_partial_shape(2); diff --git a/ngraph/core/src/op/gelu.cpp b/ngraph/core/src/op/gelu.cpp index 1f9a628c841..e1a229fc768 100644 --- a/ngraph/core/src/op/gelu.cpp +++ b/ngraph/core/src/op/gelu.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/builder/make_constant.hpp" #include "ngraph/op/add.hpp" @@ -41,6 +42,7 @@ op::Gelu::Gelu(const Output& data) bool ngraph::op::v0::Gelu::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Gelu_visit_attributes); return true; } @@ -67,6 +69,7 @@ OutputVector op::Gelu::decompose_op() const shared_ptr op::Gelu::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Gelu_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Incorrect number of new arguments"); diff --git a/ngraph/core/src/op/greater.cpp b/ngraph/core/src/op/greater.cpp index b8b1cf36cd5..b5b869fd687 100644 --- a/ngraph/core/src/op/greater.cpp +++ b/ngraph/core/src/op/greater.cpp @@ -77,6 +77,7 @@ op::v1::Greater::Greater(const Output& arg0, shared_ptr op::v1::Greater::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Greater_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -84,9 +85,6 @@ shared_ptr op::v1::Greater::clone_with_new_inputs(const OutputVector& new_ bool op::v1::Greater::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Greater_evaluate) - { - return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Greater_evaluate); + return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/greater_eq.cpp b/ngraph/core/src/op/greater_eq.cpp index 6767802e1bd..0dc8651d795 100644 --- a/ngraph/core/src/op/greater_eq.cpp +++ b/ngraph/core/src/op/greater_eq.cpp @@ -77,6 +77,7 @@ op::v1::GreaterEqual::GreaterEqual(const Output& arg0, shared_ptr op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_GreaterEqual_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -84,10 +85,6 @@ shared_ptr op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector& bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate) - { - return greater_equalop::evaluate_greater_equal( - inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate); + return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/grn.cpp b/ngraph/core/src/op/grn.cpp index 3668d227238..fb5f0de0fd3 100644 --- a/ngraph/core/src/op/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include #include +#include "itt.hpp" #include "grn.hpp" #include "ngraph/attribute_visitor.hpp" @@ -43,6 +44,7 @@ op::GRN::GRN(const Output& data, float bias) bool ngraph::op::v0::GRN::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_GRN_visit_attributes); visitor.on_attribute("bias", m_bias); return true; } @@ -96,6 +98,7 @@ OutputVector op::GRN::decompose_op() const shared_ptr op::GRN::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_GRN_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Incorrect number of new arguments"); diff --git a/ngraph/core/src/op/group_conv.cpp b/ngraph/core/src/op/group_conv.cpp index 0ae3c2212fe..54c41204953 100644 --- a/ngraph/core/src/op/group_conv.cpp +++ b/ngraph/core/src/op/group_conv.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/reshape.hpp" @@ -60,6 +61,7 @@ op::v1::GroupConvolution::GroupConvolution(const Output& data_batch, bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_GroupConvolution_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); visitor.on_attribute("pads_end", m_pads_end); @@ -70,6 +72,7 @@ bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visito void op::v1::GroupConvolution::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_GroupConvolution_validate_and_infer_types); PartialShape data_batch_shape = get_input_partial_shape(0); PartialShape filters_shape = get_input_partial_shape(1); element::Type data_batch_et = get_input_element_type(0); @@ -175,6 +178,7 @@ void op::v1::GroupConvolution::validate_and_infer_types() shared_ptr op::v1::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_GroupConvolution_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), @@ -254,6 +258,7 @@ op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData( bool ngraph::op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_GroupConvolutionBackpropData_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); visitor.on_attribute("pads_end", m_pads_end); @@ -552,6 +557,7 @@ OutputVector op::v1::GroupConvolutionBackpropData::decompose_op() const shared_ptr op::v1::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_GroupConvolutionBackpropData_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { diff --git a/ngraph/core/src/op/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp index f84c4dee2ae..b57346bb612 100644 --- a/ngraph/core/src/op/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -103,12 +103,14 @@ op::v3::GRUCell::GRUCell(const Output& X, bool op::v3::GRUCell::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_GRUCell_visit_attributes); visitor.on_attribute("linear_before_reset", m_linear_before_reset); return op::util::RNNCellBase::visit_attributes(visitor); } void op::v3::GRUCell::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_GRUCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -216,6 +218,7 @@ void op::v3::GRUCell::add_default_bias_input() shared_ptr op::v3::GRUCell::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_GRUCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { diff --git a/ngraph/core/src/op/gru_sequence.cpp b/ngraph/core/src/op/gru_sequence.cpp index fc7cb620d3d..d7dd680b928 100644 --- a/ngraph/core/src/op/gru_sequence.cpp +++ b/ngraph/core/src/op/gru_sequence.cpp @@ -17,6 +17,7 @@ #include #include #include +#include "itt.hpp" #include "ngraph/op/gru_sequence.hpp" #include "ngraph/op/util/recurrent_sequence.hpp" @@ -60,6 +61,7 @@ op::v5::GRUSequence::GRUSequence(const Output& X, void op::v5::GRUSequence::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_GRUSequence_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -175,6 +177,7 @@ void op::v5::GRUSequence::validate_and_infer_types() bool op::v5::GRUSequence::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_GRUSequence_visit_attributes); visitor.on_attribute("direction", m_direction); visitor.on_attribute("linear_before_reset", m_linear_before_reset); return op::util::RNNCellBase::visit_attributes(visitor); @@ -182,6 +185,7 @@ bool op::v5::GRUSequence::visit_attributes(AttributeVisitor& visitor) shared_ptr op::v5::GRUSequence::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_GRUSequence_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), diff --git a/ngraph/core/src/op/hard_sigmoid.cpp b/ngraph/core/src/op/hard_sigmoid.cpp index 89d3e30a9cb..71d189206bb 100644 --- a/ngraph/core/src/op/hard_sigmoid.cpp +++ b/ngraph/core/src/op/hard_sigmoid.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" @@ -41,6 +42,7 @@ op::HardSigmoid::HardSigmoid(const Output& data, bool ngraph::op::v0::HardSigmoid::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_HardSigmoid_visit_attributes); return true; } @@ -102,6 +104,7 @@ OutputVector op::HardSigmoid::decompose_op() const shared_ptr op::HardSigmoid::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_HardSigmoid_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); diff --git a/ngraph/core/src/op/hsigmoid.cpp b/ngraph/core/src/op/hsigmoid.cpp index 8ea589979bc..b3341aca0b3 100644 --- a/ngraph/core/src/op/hsigmoid.cpp +++ b/ngraph/core/src/op/hsigmoid.cpp @@ -35,11 +35,13 @@ op::v5::HSigmoid::HSigmoid(const Output& arg) bool op::v5::HSigmoid::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_HSigmoid_visit_attributes); return true; } shared_ptr op::v5::HSigmoid::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_HSigmoid_clone_with_new_inputs); return make_shared(new_args.at(0)); } @@ -73,9 +75,6 @@ namespace bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v5_HSigmoid_evaluate) - { - return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v5_HSigmoid_evaluate); + return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/hswish.cpp b/ngraph/core/src/op/hswish.cpp index 566043bec61..d165f70ea78 100644 --- a/ngraph/core/src/op/hswish.cpp +++ b/ngraph/core/src/op/hswish.cpp @@ -35,11 +35,13 @@ op::v4::HSwish::HSwish(const Output& arg) bool op::v4::HSwish::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_HSwish_visit_attributes); return true; } shared_ptr op::v4::HSwish::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_HSwish_clone_with_new_inputs); return make_shared(new_args.at(0)); } @@ -72,9 +74,6 @@ namespace hswish bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_HSwish_evaluate) - { - return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v4_HSwish_evaluate); + return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index cda5142bc90..2c535b6c416 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -39,6 +39,7 @@ op::v0::Interpolate::Interpolate(const Output& image, bool op::v0::Interpolate::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Interpolate_visit_attributes); visitor.on_attribute("align_corners", m_attrs.align_corners); visitor.on_attribute("antialias", m_attrs.antialias); visitor.on_attribute("axes", m_attrs.axes); @@ -50,6 +51,7 @@ bool op::v0::Interpolate::visit_attributes(AttributeVisitor& visitor) void op::v0::Interpolate::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Interpolate_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(1).is_integral_number(), "output shape must be an integral number."); @@ -79,6 +81,7 @@ void op::v0::Interpolate::validate_and_infer_types() shared_ptr op::v0::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Interpolate_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_attrs); } @@ -133,6 +136,7 @@ op::v4::Interpolate::Interpolate(const Output& image, bool op::v4::Interpolate::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_Interpolate_visit_attributes); visitor.on_attribute("mode", m_attrs.mode); visitor.on_attribute("shape_calculation_mode", m_attrs.shape_calculation_mode); visitor.on_attribute("coordinate_transformation_mode", m_attrs.coordinate_transformation_mode); @@ -220,6 +224,7 @@ PartialShape op::v4::Interpolate::get_padded_input_shape(const PartialShape& inp void op::v4::Interpolate::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_Interpolate_validate_and_infer_types); element::Type input_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, input_et == element::f32 || input_et == element::f16 || @@ -274,6 +279,7 @@ void op::v4::Interpolate::validate_and_infer_types() shared_ptr op::v4::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_Interpolate_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() <= 3) { @@ -497,8 +503,8 @@ bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs, bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_Interpolate_evaluate) { return evaluate_interpolate(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v4_Interpolate_evaluate); + return evaluate_interpolate(outputs, inputs); } namespace ngraph diff --git a/ngraph/core/src/op/less.cpp b/ngraph/core/src/op/less.cpp index 09513d6b97f..ecebeeea8ff 100644 --- a/ngraph/core/src/op/less.cpp +++ b/ngraph/core/src/op/less.cpp @@ -77,15 +77,13 @@ op::v1::Less::Less(const Output& arg0, shared_ptr op::v1::Less::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Less_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Less_evaluate) - { - return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Less_evaluate); + return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/less_eq.cpp b/ngraph/core/src/op/less_eq.cpp index 8d886c88fe2..5efc045e108 100644 --- a/ngraph/core/src/op/less_eq.cpp +++ b/ngraph/core/src/op/less_eq.cpp @@ -38,6 +38,7 @@ op::v1::LessEqual::LessEqual(const Output& arg0, shared_ptr op::v1::LessEqual::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_LessEqual_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -84,9 +85,6 @@ namespace less_equalop bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_LessEqual_evaluate) - { - return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_LessEqual_evaluate); + return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/log.cpp b/ngraph/core/src/op/log.cpp index 4923977c810..d431a5979f3 100644 --- a/ngraph/core/src/op/log.cpp +++ b/ngraph/core/src/op/log.cpp @@ -35,11 +35,13 @@ op::Log::Log(const Output& arg) bool ngraph::op::v0::Log::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Log_visit_attributes); return true; } shared_ptr op::Log::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Log_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -76,9 +78,6 @@ namespace logop bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Log_evaluate) - { - return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Log_evaluate); + return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/log_softmax.cpp b/ngraph/core/src/op/log_softmax.cpp index 9fac2338527..ac1b8de1a55 100644 --- a/ngraph/core/src/op/log_softmax.cpp +++ b/ngraph/core/src/op/log_softmax.cpp @@ -33,12 +33,14 @@ op::v5::LogSoftmax::LogSoftmax(const Output& arg, const int64_t axis) bool op::v5::LogSoftmax::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_LogSoftmax_visit_attributes); visitor.on_attribute("axis", m_axis); return true; } void op::v5::LogSoftmax::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_LogSoftmax_validate_and_infer_types); const PartialShape& input_shape = get_input_partial_shape(0); if (input_shape.rank().is_static()) NODE_VALIDATION_CHECK(this, @@ -55,6 +57,7 @@ void op::v5::LogSoftmax::validate_and_infer_types() shared_ptr op::v5::LogSoftmax::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_LogSoftmax_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_axis); } diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 91468214a94..0d4c02287c6 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -37,6 +37,7 @@ op::v5::Loop::Loop(const Output& trip_count, const Output& execution bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_Loop_visit_attributes); visitor.on_attribute("body", m_body); visitor.on_attribute("input_descriptions", m_input_descriptions); visitor.on_attribute("output_descriptions", m_output_descriptions); @@ -46,6 +47,7 @@ bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor) void op::v5::Loop::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_Loop_validate_and_infer_types); if (m_special_body_ports.current_iteration_input_idx >= 0) { const auto& cur_iter_rank = m_body->get_parameters() @@ -299,7 +301,9 @@ void op::v5::Loop::validate_and_infer_types() std::shared_ptr op::v5::Loop::clone_with_new_inputs(const OutputVector& new_args) const { - // 0 - trip_count, 1 - execution condition, these inputs are not connected to the body params + NGRAPH_OP_SCOPE(v5_Loop_clone_with_new_inputs); + // 0 - trip_count, 1 - execution condition, these inputs are not connected to the body + // params OutputVector body_params_args(new_args.begin() + 2, new_args.end()); auto op = make_shared(new_args[0], new_args[1]); for (int idx = 2; idx < new_args.size(); ++idx) @@ -390,15 +394,8 @@ Output op::v5::Loop::get_concatenated_slices(const Output& value, bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v5_Loop_evaluate) - { - runtime::reference::loop(m_body, - m_output_descriptions, - m_input_descriptions, - m_special_body_ports, - outputs, - inputs); - return true; - } - return false; + NGRAPH_OP_SCOPE(v5_Loop_evaluate); + runtime::reference::loop( + m_body, m_output_descriptions, m_input_descriptions, m_special_body_ports, outputs, inputs); + return true; } diff --git a/ngraph/core/src/op/lrn.cpp b/ngraph/core/src/op/lrn.cpp index 0ebe097acde..7774a1fde0d 100644 --- a/ngraph/core/src/op/lrn.cpp +++ b/ngraph/core/src/op/lrn.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/lrn.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/multiply.hpp" @@ -58,6 +59,7 @@ AxisSet op::LRN::get_reduction_axes() const void op::LRN::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_LRN_validate_and_infer_types); element::Type arg_type = get_input_element_type(0); PartialShape arg_shape = get_input_partial_shape(0); set_output_type(0, arg_type, arg_shape); @@ -114,6 +116,7 @@ void op::LRN::validate_and_infer_types() bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_LRN_visit_attributes); visitor.on_attribute("alpha", m_alpha); visitor.on_attribute("beta", m_beta); visitor.on_attribute("bias", m_bias); @@ -123,6 +126,7 @@ bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) shared_ptr op::LRN::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_LRN_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); } diff --git a/ngraph/core/src/op/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp index 0d2b24d53ea..6d30ee9f1e2 100644 --- a/ngraph/core/src/op/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -16,6 +16,7 @@ #include #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/concat.hpp" @@ -129,6 +130,7 @@ op::v0::LSTMCell::LSTMCell(const Output& X, bool ngraph::op::v0::LSTMCell::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_LSTMCell_visit_attributes); visitor.on_attribute("hidden_size", m_hidden_size); visitor.on_attribute("activations", m_activations); visitor.on_attribute("activations_alpha", m_activations_alpha); @@ -142,6 +144,7 @@ bool ngraph::op::v0::LSTMCell::visit_attributes(AttributeVisitor& visitor) void op::v0::LSTMCell::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_LSTMCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -158,7 +161,8 @@ void op::v0::LSTMCell::validate_and_infer_types() auto merged_hidden_size = Dimension::dynamic(); auto result_et = element::dynamic; - // Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information + // Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) + // information // for further validation for (size_t i = 0; i < get_input_size() - 1; i++) { @@ -206,7 +210,8 @@ void op::v0::LSTMCell::validate_and_infer_types() element::Type::merge(result_et, result_et, get_input_element_type(3)) && element::Type::merge(result_et, result_et, get_input_element_type(4)) && element::Type::merge(result_et, result_et, get_input_element_type(5)), - "Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not match."); + "Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not " + "match."); // Merge batch_size dimension across all inputs to evaluate output[0] dimension NODE_VALIDATION_CHECK( @@ -305,6 +310,7 @@ Output op::v0::LSTMCell::get_default_peepholes_input() const shared_ptr op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_LSTMCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 5) { @@ -441,11 +447,13 @@ op::v4::LSTMCell::LSTMCell(const Output& X, bool ngraph::op::v4::LSTMCell::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_LSTMCell_visit_attributes); return op::util::RNNCellBase::visit_attributes(visitor); } void op::v4::LSTMCell::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_LSTMCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -482,7 +490,8 @@ void op::v4::LSTMCell::validate_and_infer_types() element::Type::merge(result_et, result_et, get_input_element_type(3)) && element::Type::merge(result_et, result_et, get_input_element_type(4)) && element::Type::merge(result_et, result_et, get_input_element_type(5)), - "Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not match."); + "Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not " + "match."); // Merge batch_size dimension across all inputs to evaluate output[0] dimension NODE_VALIDATION_CHECK( @@ -562,6 +571,7 @@ Output op::v4::LSTMCell::get_default_bias_input() const shared_ptr op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_LSTMCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 5) { diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index ab3607c425e..6b78dadd011 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/lstm_sequence.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/autobroadcast.hpp" @@ -34,6 +35,7 @@ NGRAPH_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); bool ngraph::op::v0::LSTMSequence::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_LSTMSequence_visit_attributes); visitor.on_attribute("hidden_size", m_hidden_size); visitor.on_attribute("activations", m_activations); visitor.on_attribute("activations_alpha", m_activations_alpha); @@ -71,6 +73,7 @@ OutputVector op::v0::LSTMSequence::decompose_op() const shared_ptr op::v0::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_LSTMSequence_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 8) { @@ -263,6 +266,7 @@ shared_ptr op::v0::LSTMSequence::prepare_input(Output node, void op::v0::LSTMSequence::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_LSTMSequence_validate_and_infer_types); std::vector input_param{}; auto lstm_seq_gates_count = 4; @@ -272,7 +276,8 @@ void op::v0::LSTMSequence::validate_and_infer_types() auto merged_num_directions = Dimension::dynamic(); auto result_et = element::dynamic; - // Copy all inputs without peephole and initial_cell_state information for further validation + // Copy all inputs without peephole and initial_cell_state information for further + // validation for (size_t i = 0; i < get_input_size() - 1; i++) { // exclude initial_cell_state from the loop @@ -320,7 +325,8 @@ void op::v0::LSTMSequence::validate_and_infer_types() element::Type::merge(result_et, result_et, get_input_element_type(4)) && element::Type::merge(result_et, result_et, get_input_element_type(5)) && element::Type::merge(result_et, result_et, get_input_element_type(6)), - "Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do not " + "Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do " + "not " "match."); // Merge batch_size dimension across all inputs to evaluate output[0] dimension @@ -421,12 +427,14 @@ void op::v0::LSTMSequence::validate_and_infer_types() bool ngraph::op::v5::LSTMSequence::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_LSTMSequence_visit_attributes); visitor.on_attribute("direction", m_direction); return op::util::RNNCellBase::visit_attributes(visitor); } shared_ptr op::v5::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_LSTMSequence_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 7) { @@ -452,6 +460,7 @@ shared_ptr op::v5::LSTMSequence::clone_with_new_inputs(const OutputVector& void op::v5::LSTMSequence::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_LSTMSequence_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -505,7 +514,8 @@ void op::v5::LSTMSequence::validate_and_infer_types() element::Type::merge(result_et, result_et, get_input_element_type(4)) && element::Type::merge(result_et, result_et, get_input_element_type(5)) && element::Type::merge(result_et, result_et, get_input_element_type(6)), - "Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do not " + "Element types for X, initial_hidden_state, initial_cell_state, W, R and B inputs do " + "not " "match."); // Merge batch_size dimension across all inputs to evaluate output[0] dimension diff --git a/ngraph/core/src/op/matmul.cpp b/ngraph/core/src/op/matmul.cpp index 70dab5cbaa9..8370a942886 100644 --- a/ngraph/core/src/op/matmul.cpp +++ b/ngraph/core/src/op/matmul.cpp @@ -40,6 +40,7 @@ op::MatMul::MatMul(const Output& A, bool ngraph::op::v0::MatMul::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_MatMul_visit_attributes); visitor.on_attribute("transpose_a", m_transpose_a); visitor.on_attribute("transpose_b", m_transpose_b); return true; @@ -47,6 +48,7 @@ bool ngraph::op::v0::MatMul::visit_attributes(AttributeVisitor& visitor) shared_ptr op::MatMul::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_MatMul_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_transpose_a, m_transpose_b); } @@ -259,16 +261,14 @@ namespace matmul bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_MatMul_evaluate) - { - return matmul::evaluate_matmul( - inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b()); - } - return false; + NGRAPH_OP_SCOPE(v0_MatMul_evaluate); + return matmul::evaluate_matmul( + inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b()); } void ngraph::op::v0::MatMul::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_MatMul_validate_and_infer_types); element::Type result_et; NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/max.cpp b/ngraph/core/src/op/max.cpp index 3c06ac54e1d..162630385b6 100644 --- a/ngraph/core/src/op/max.cpp +++ b/ngraph/core/src/op/max.cpp @@ -70,6 +70,7 @@ op::v1::ReduceMax::ReduceMax(const Output& arg, shared_ptr op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceMax_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -77,9 +78,6 @@ shared_ptr op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& ne bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate) - { - return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate); + return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index fa273305ad3..eea27c5e5d9 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -70,6 +70,7 @@ op::v1::MaxPool::MaxPool(const Output& arg, bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_MaxPool_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); visitor.on_attribute("pads_end", m_pads_end); @@ -81,6 +82,7 @@ bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) void op::v1::MaxPool::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_MaxPool_validate_and_infer_types); if (0 == m_strides.size()) { m_strides = Strides(m_kernel.size(), 1); @@ -135,6 +137,7 @@ void op::v1::MaxPool::validate_and_infer_types() shared_ptr op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_MaxPool_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), m_strides, m_pads_begin, m_pads_end, m_kernel, m_rounding_type, m_auto_pad); @@ -229,6 +232,6 @@ bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_MaxPool_evaluate) { return evaluate_maxpool(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_MaxPool_evaluate); + return evaluate_maxpool(outputs, inputs); } diff --git a/ngraph/core/src/op/maximum.cpp b/ngraph/core/src/op/maximum.cpp index f5846c4ab0a..77b1cc518ed 100644 --- a/ngraph/core/src/op/maximum.cpp +++ b/ngraph/core/src/op/maximum.cpp @@ -84,6 +84,7 @@ op::v1::Maximum::Maximum(const Output& arg0, shared_ptr op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Maximum_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -91,9 +92,6 @@ shared_ptr op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_ bool op::v1::Maximum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Maximum_evaluate) - { - return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Maximum_evaluate); + return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/min.cpp b/ngraph/core/src/op/min.cpp index 2a06b6e8821..9f7fd618dc1 100644 --- a/ngraph/core/src/op/min.cpp +++ b/ngraph/core/src/op/min.cpp @@ -72,6 +72,7 @@ op::v1::ReduceMin::ReduceMin(const Output& arg, shared_ptr op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceMin_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -79,9 +80,6 @@ shared_ptr op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& ne bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate) - { - return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate); + return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/minimum.cpp b/ngraph/core/src/op/minimum.cpp index cc120a5f257..bd21a103eb1 100644 --- a/ngraph/core/src/op/minimum.cpp +++ b/ngraph/core/src/op/minimum.cpp @@ -82,6 +82,7 @@ op::v1::Minimum::Minimum(const Output& arg0, shared_ptr op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Minimum_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -89,9 +90,6 @@ shared_ptr op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_ bool op::v1::Minimum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Minimum_evaluate) - { - return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Minimum_evaluate); + return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/mish.cpp b/ngraph/core/src/op/mish.cpp index 831a4412f30..9a0e1202dd4 100644 --- a/ngraph/core/src/op/mish.cpp +++ b/ngraph/core/src/op/mish.cpp @@ -35,17 +35,20 @@ op::v4::Mish::Mish(const Output& arg) bool op::v4::Mish::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_Mish_visit_attributes); return true; } void op::v4::Mish::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_Mish_validate_and_infer_types); set_output_size(1); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } shared_ptr op::v4::Mish::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_Mish_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -77,9 +80,6 @@ namespace mish bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_Mish_evaluate) - { - return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v4_Mish_evaluate); + return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/mod.cpp b/ngraph/core/src/op/mod.cpp index 30284534137..2543c474ab1 100644 --- a/ngraph/core/src/op/mod.cpp +++ b/ngraph/core/src/op/mod.cpp @@ -14,6 +14,7 @@ // limitations under the License. //***************************************************************************** #include "ngraph/op/mod.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/make_constant.hpp" #include "ngraph/op/abs.hpp" @@ -40,6 +41,7 @@ op::v1::Mod::Mod(const Output& A, bool ngraph::op::v1::Mod::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Mod_visit_attributes); visitor.on_attribute("auto_broadcast", m_auto_broadcast); return true; } @@ -66,5 +68,6 @@ OutputVector op::v1::Mod::decompose_op() const shared_ptr op::v1::Mod::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Mod_clone_with_new_inputs); return make_shared(new_args.at(0), new_args.at(1), m_auto_broadcast); } diff --git a/ngraph/core/src/op/multiply.cpp b/ngraph/core/src/op/multiply.cpp index ea65cd028a6..8a413d3b734 100644 --- a/ngraph/core/src/op/multiply.cpp +++ b/ngraph/core/src/op/multiply.cpp @@ -77,6 +77,7 @@ op::v0::Multiply::Multiply(const Output& arg0, shared_ptr op::v0::Multiply::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Multiply_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -84,11 +85,8 @@ shared_ptr op::v0::Multiply::clone_with_new_inputs(const OutputVector& new bool op::v0::Multiply::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Multiply_evaluate) - { - return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v0_Multiply_evaluate); + return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()); } // ------------------------------------ v1 ------------------------------------- @@ -105,6 +103,7 @@ op::v1::Multiply::Multiply(const Output& arg0, shared_ptr op::v1::Multiply::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Multiply_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -112,9 +111,6 @@ shared_ptr op::v1::Multiply::clone_with_new_inputs(const OutputVector& new bool op::v1::Multiply::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Multiply_evaluate) - { - return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Multiply_evaluate); + return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/mvn.cpp b/ngraph/core/src/op/mvn.cpp index d76d5ada662..285d62829e1 100644 --- a/ngraph/core/src/op/mvn.cpp +++ b/ngraph/core/src/op/mvn.cpp @@ -14,6 +14,7 @@ // limitations under the License. //***************************************************************************** #include +#include "itt.hpp" #include "mvn.hpp" #include "ngraph/builder/autobroadcast.hpp" @@ -60,6 +61,7 @@ op::MVN::MVN(const Output& data, AxisSet reduction_axes, bool normalize_va // instead of relying on op decomposition. void op::MVN::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_MVN_validate_and_infer_types); // if m_across_channels is true we should calculate mean and variance per batch // else we calculate these per channel if (m_reduction_axes.empty() && input_value(0).get_partial_shape().rank().is_static()) @@ -106,6 +108,7 @@ OutputVector op::MVN::decompose_op() const shared_ptr op::MVN::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_MVN_clone_with_new_inputs); NODE_VALIDATION_CHECK(this, new_args.size() == 1, "Expected 1 element in new_args for the MVN op but got ", @@ -115,6 +118,7 @@ shared_ptr op::MVN::clone_with_new_inputs(const OutputVector& new_args) co bool op::MVN::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_MVN_visit_attributes); visitor.on_attribute("eps", m_eps); visitor.on_attribute("across_channels", m_across_channels); visitor.on_attribute("normalize_variance", m_normalize_variance); @@ -161,6 +165,7 @@ op::v6::MVN::MVN(const Output& data, void op::v6::MVN::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v6_MVN_validate_and_infer_types); const auto data = get_input_partial_shape(0); const auto axes = get_input_partial_shape(1); @@ -183,6 +188,7 @@ void op::v6::MVN::validate_and_infer_types() shared_ptr op::v6::MVN::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v6_MVN_clone_with_new_inputs); NODE_VALIDATION_CHECK(this, new_args.size() == 2, "Expected 2 element in new_args for the MVN op but got ", @@ -193,6 +199,7 @@ shared_ptr op::v6::MVN::clone_with_new_inputs(const OutputVector& new_args bool op::v6::MVN::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v6_MVN_visit_attributes); visitor.on_attribute("eps", m_eps); visitor.on_attribute("normalize_variance", m_normalize_variance); visitor.on_attribute("eps_mode", m_eps_mode); diff --git a/ngraph/core/src/op/negative.cpp b/ngraph/core/src/op/negative.cpp index 0664b8bd1e0..5e673c178fc 100644 --- a/ngraph/core/src/op/negative.cpp +++ b/ngraph/core/src/op/negative.cpp @@ -32,11 +32,13 @@ op::Negative::Negative(const Output& arg) bool ngraph::op::v0::Negative::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Negative_visit_attributes); return true; } shared_ptr op::Negative::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Negative_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -73,12 +75,8 @@ namespace negativeop bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Negative_evaluate) - { - return negativeop::evaluate_negative( - inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Negative_evaluate); + return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0))); } shared_ptr ngraph::operator-(const Output& arg0) diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index d5e715b6865..1b600c906bf 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -16,6 +16,7 @@ #include "ngraph/op/non_max_suppression.hpp" #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/util/op_types.hpp" @@ -64,6 +65,7 @@ op::v1::NonMaxSuppression::NonMaxSuppression( std::shared_ptr op::v1::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_NonMaxSuppression_clone_with_new_inputs); check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, @@ -85,6 +87,7 @@ std::shared_ptr bool ngraph::op::v1::NonMaxSuppression::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_NonMaxSuppression_visit_attributes); visitor.on_attribute("box_encoding", m_box_encoding); visitor.on_attribute("sort_result_descending", m_sort_result_descending); return true; @@ -92,11 +95,13 @@ bool ngraph::op::v1::NonMaxSuppression::visit_attributes(AttributeVisitor& visit void op::v1::NonMaxSuppression::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_NonMaxSuppression_validate_and_infer_types); const auto boxes_ps = get_input_partial_shape(0); const auto scores_ps = get_input_partial_shape(1); // the spec doesn't say what exact type should be used for the output of this op - // that's why we're setting it to 64-bit integer to provide the maximum range of values support + // that's why we're setting it to 64-bit integer to provide the maximum range of values + // support // this will be changed (configurable) in the next version of this op const auto& output_element_type = element::i64; @@ -262,6 +267,7 @@ op::v3::NonMaxSuppression::NonMaxSuppression( std::shared_ptr op::v3::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_NonMaxSuppression_clone_with_new_inputs); check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, @@ -289,6 +295,7 @@ std::shared_ptr bool ngraph::op::v3::NonMaxSuppression::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_NonMaxSuppression_visit_attributes); visitor.on_attribute("box_encoding", m_box_encoding); visitor.on_attribute("sort_result_descending", m_sort_result_descending); visitor.on_attribute("output_type", m_output_type); @@ -375,6 +382,7 @@ void op::v3::NonMaxSuppression::validate() void op::v3::NonMaxSuppression::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_NonMaxSuppression_validate_and_infer_types); const auto boxes_ps = get_input_partial_shape(0); const auto scores_ps = get_input_partial_shape(1); @@ -481,6 +489,7 @@ op::v4::NonMaxSuppression::NonMaxSuppression( std::shared_ptr op::v4::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_NonMaxSuppression_clone_with_new_inputs); check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, @@ -508,6 +517,7 @@ std::shared_ptr void op::v4::NonMaxSuppression::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_NonMaxSuppression_validate_and_infer_types); const auto boxes_ps = get_input_partial_shape(0); const auto scores_ps = get_input_partial_shape(1); @@ -627,6 +637,7 @@ op::v5::NonMaxSuppression::NonMaxSuppression( std::shared_ptr op::v5::NonMaxSuppression::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_NonMaxSuppression_clone_with_new_inputs); check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 6, @@ -885,6 +896,7 @@ float op::v5::NonMaxSuppression::soft_nms_sigma_from_input() const bool ngraph::op::v5::NonMaxSuppression::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_NonMaxSuppression_visit_attributes); visitor.on_attribute("box_encoding", m_box_encoding); visitor.on_attribute("sort_result_descending", m_sort_result_descending); visitor.on_attribute("output_type", m_output_type); @@ -893,6 +905,7 @@ bool ngraph::op::v5::NonMaxSuppression::visit_attributes(AttributeVisitor& visit void op::v5::NonMaxSuppression::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_NonMaxSuppression_validate_and_infer_types); const auto boxes_ps = get_input_partial_shape(0); const auto scores_ps = get_input_partial_shape(1); diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index c51506f955e..83a7d9a5019 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -48,12 +48,14 @@ op::v3::NonZero::NonZero(const Output& arg, const element::Type& output_ty bool ngraph::op::v3::NonZero::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_NonZero_visit_attributes); visitor.on_attribute("output_type", m_output_type); return true; } void op::v3::NonZero::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_NonZero_validate_and_infer_types); const PartialShape& input_shape = get_input_partial_shape(0); const auto input_et = get_input_element_type(0); @@ -80,6 +82,7 @@ void op::v3::NonZero::validate_and_infer_types() shared_ptr op::v3::NonZero::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_NonZero_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_output_type); } @@ -118,10 +121,8 @@ namespace nonzero #define TYPE_OUT_CASE(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a)) \ - { \ - rc = evaluate_nonzero_execute(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a)); \ + rc = evaluate_nonzero_execute(__VA_ARGS__); \ } \ break @@ -161,9 +162,6 @@ namespace nonzero bool op::v3::NonZero::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v3_NonZero_evaluate) - { - return nonzero::evaluate_nonzero(inputs[0], outputs[0]); - } - return false; + NGRAPH_OP_SCOPE(v3_NonZero_evaluate); + return nonzero::evaluate_nonzero(inputs[0], outputs[0]); } diff --git a/ngraph/core/src/op/normalize_l2.cpp b/ngraph/core/src/op/normalize_l2.cpp index bf0d6abf850..41cda52422a 100644 --- a/ngraph/core/src/op/normalize_l2.cpp +++ b/ngraph/core/src/op/normalize_l2.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/norm.hpp" @@ -45,6 +46,7 @@ op::NormalizeL2::NormalizeL2(const Output& data, bool ngraph::op::v0::NormalizeL2::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_NormalizeL2_visit_attributes); visitor.on_attribute("eps", m_eps); visitor.on_attribute("eps_mode", m_eps_mode); return true; @@ -116,6 +118,7 @@ OutputVector op::NormalizeL2::decompose_op() const shared_ptr op::NormalizeL2::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_NormalizeL2_clone_with_new_inputs); if (new_args.size() != 2) { throw ngraph_error("Incorrect number of new arguments"); diff --git a/ngraph/core/src/op/not.cpp b/ngraph/core/src/op/not.cpp index d6d403c90ca..75358765390 100644 --- a/ngraph/core/src/op/not.cpp +++ b/ngraph/core/src/op/not.cpp @@ -38,12 +38,14 @@ op::v1::LogicalNot::LogicalNot(const Output& arg) bool ngraph::op::v1::LogicalNot::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_LogicalNot_visit_attributes); return true; } // TODO(amprocte): Update this to allow only boolean, for consistency with logical binops. void op::v1::LogicalNot::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_LogicalNot_validate_and_infer_types); auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this); element::Type& args_et = std::get<0>(args_et_pshape); PartialShape& args_pshape = std::get<1>(args_et_pshape); @@ -53,6 +55,7 @@ void op::v1::LogicalNot::validate_and_infer_types() shared_ptr op::v1::LogicalNot::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_LogicalNot_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -91,9 +94,6 @@ namespace notop bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate) - { - return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate); + return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 74453fe4794..4e12047bd70 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -77,6 +77,7 @@ op::v1::NotEqual::NotEqual(const Output& arg0, shared_ptr op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_NotEqual_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -84,14 +85,12 @@ shared_ptr op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_NotEqual_evaluate) - { - return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_NotEqual_evaluate); + return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()); } bool op::v1::NotEqual::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_NotEqual_visit_attributes); return true; } diff --git a/ngraph/core/src/op/one_hot.cpp b/ngraph/core/src/op/one_hot.cpp index 6c5d2d8ecaa..4305f5ecdfd 100644 --- a/ngraph/core/src/op/one_hot.cpp +++ b/ngraph/core/src/op/one_hot.cpp @@ -39,6 +39,7 @@ op::v1::OneHot::OneHot(const Output& indices, void op::v1::OneHot::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_OneHot_validate_and_infer_types); const auto& indices_et = get_input_element_type(0); const auto& depth_et = get_input_element_type(1); const auto& on_value_et = get_input_element_type(2); @@ -121,12 +122,14 @@ void op::v1::OneHot::validate_and_infer_types() bool ngraph::op::v1::OneHot::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_OneHot_visit_attributes); visitor.on_attribute("axis", m_axis); return true; } shared_ptr op::v1::OneHot::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_OneHot_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_axis); @@ -159,12 +162,10 @@ namespace detail #define TYPE_OUT_CASE(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a)) \ - { \ - using IT = typename element_type_traits::value_type; \ - using OT = typename element_type_traits::value_type; \ - rc = evaluate(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a)); \ + using IT = typename element_type_traits::value_type; \ + using OT = typename element_type_traits::value_type; \ + rc = evaluate(__VA_ARGS__); \ } \ break @@ -208,9 +209,6 @@ namespace detail bool op::v1::OneHot::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v1_OneHot_evaluate) - { - return detail::evaluate_onehot(output_values, input_values, get_axis()); - } - return false; + NGRAPH_OP_SCOPE(v1_OneHot_evaluate); + return detail::evaluate_onehot(output_values, input_values, get_axis()); } diff --git a/ngraph/core/src/op/or.cpp b/ngraph/core/src/op/or.cpp index 3ac03a90750..28ff696b472 100644 --- a/ngraph/core/src/op/or.cpp +++ b/ngraph/core/src/op/or.cpp @@ -36,6 +36,7 @@ op::v1::LogicalOr::LogicalOr(const Output& arg0, shared_ptr op::v1::LogicalOr::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_LogicalOr_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -82,9 +83,6 @@ namespace logor bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate) - { - return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate); + return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/pad.cpp b/ngraph/core/src/op/pad.cpp index 95d2c6ad793..fc3ab3127be 100644 --- a/ngraph/core/src/op/pad.cpp +++ b/ngraph/core/src/op/pad.cpp @@ -75,12 +75,14 @@ CoordinateDiff op::v1::Pad::get_pads_end() const bool ngraph::op::v1::Pad::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Pad_visit_attributes); visitor.on_attribute("pad_mode", m_pad_mode); return true; } void op::v1::Pad::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Pad_validate_and_infer_types); element::Type result_et; const auto& arg_element_type = get_input_element_type(0); @@ -140,12 +142,12 @@ void op::v1::Pad::validate_and_infer_types() const auto& arg_shape_rank = arg_shape.rank(); if (arg_shape_rank.is_static() && pads_begin_shape.is_static()) { - NODE_VALIDATION_CHECK( - this, - pads_begin_shape[0].get_length() <= arg_shape_rank.get_length(), - "Number of elements of pads_begin must be >= 0 and <= arg rank (pads_begin_shape[0]: ", - pads_begin_shape[0], - ")."); + NODE_VALIDATION_CHECK(this, + pads_begin_shape[0].get_length() <= arg_shape_rank.get_length(), + "Number of elements of pads_begin must be >= 0 and <= arg rank " + "(pads_begin_shape[0]: ", + pads_begin_shape[0], + ")."); } if (arg_shape_rank.is_static() && pads_end_shape.is_static()) { @@ -175,16 +177,18 @@ void op::v1::Pad::validate_and_infer_types() result_dims[i] = static_cast(result_dim); if (i > 1) { - NODE_VALIDATION_CHECK( - this, - m_pad_mode != op::PadMode::EDGE || arg_shape[i].get_length() >= 1, - "EDGE padding mode requires an input of dimension of at least 1 at each " - "spatial axis."); - NODE_VALIDATION_CHECK( - this, - m_pad_mode != op::PadMode::REFLECT || arg_shape[i].get_length() >= 2, - "REFLECT padding mode requires an input of dimension of at least 2 at each " - "spatial axis."); + NODE_VALIDATION_CHECK(this, + m_pad_mode != op::PadMode::EDGE || + arg_shape[i].get_length() >= 1, + "EDGE padding mode requires an input of dimension of " + "at least 1 at each " + "spatial axis."); + NODE_VALIDATION_CHECK(this, + m_pad_mode != op::PadMode::REFLECT || + arg_shape[i].get_length() >= 2, + "REFLECT padding mode requires an input of dimension " + "of at least 2 at each " + "spatial axis."); } } } @@ -198,6 +202,7 @@ void op::v1::Pad::validate_and_infer_types() shared_ptr op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Pad_clone_with_new_inputs); check_new_args_count(this, new_args); if (get_input_size() == 4) { @@ -243,6 +248,6 @@ bool op::v1::Pad::evaluate_pad(const HostTensorVector& outputs, bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Pad_evaluate) { return evaluate_pad(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_Pad_evaluate); + return evaluate_pad(outputs, inputs); } diff --git a/ngraph/core/src/op/parameter.cpp b/ngraph/core/src/op/parameter.cpp index 0842b1e20ce..46e12cfc60f 100644 --- a/ngraph/core/src/op/parameter.cpp +++ b/ngraph/core/src/op/parameter.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/parameter.hpp" @@ -34,6 +35,7 @@ op::Parameter::Parameter(const element::Type& element_type, const PartialShape& bool op::Parameter::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Parameter_visit_attributes); visitor.on_attribute("shape", m_partial_shape); visitor.on_attribute("element_type", m_element_type); return true; @@ -41,12 +43,15 @@ bool op::Parameter::visit_attributes(AttributeVisitor& visitor) void op::Parameter::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Parameter_validate_and_infer_types); + ; Op::validate_and_infer_types(); set_output_type(0, m_element_type, m_partial_shape); } shared_ptr op::Parameter::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Parameter_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(m_element_type, m_partial_shape); } diff --git a/ngraph/core/src/op/power.cpp b/ngraph/core/src/op/power.cpp index ad73b69aa79..86ace5db63d 100644 --- a/ngraph/core/src/op/power.cpp +++ b/ngraph/core/src/op/power.cpp @@ -80,15 +80,13 @@ op::v1::Power::Power(const Output& arg0, shared_ptr op::v1::Power::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Power_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Power_evaluate) - { - return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Power_evaluate); + return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/prelu.cpp b/ngraph/core/src/op/prelu.cpp index 4dbf9c0cd60..f83cf28282d 100644 --- a/ngraph/core/src/op/prelu.cpp +++ b/ngraph/core/src/op/prelu.cpp @@ -42,6 +42,7 @@ op::PRelu::PRelu(const Output& data, const Output& slope) bool ngraph::op::v0::PRelu::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_PRelu_visit_attributes); return true; } @@ -88,6 +89,7 @@ OutputVector op::PRelu::decompose_op() const shared_ptr op::PRelu::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_PRelu_clone_with_new_inputs); if (new_args.size() != 2) { throw ngraph_error("Incorrect number of new arguments"); @@ -127,9 +129,6 @@ namespace prelu bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_PRelu_evaluate) - { - return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]); - } - return false; + NGRAPH_OP_SCOPE(v0_PRelu_evaluate); + return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]); } diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index b1d70094c60..0e8ae5d9dce 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -38,6 +38,7 @@ op::PriorBox::PriorBox(const Output& layer_shape, void op::PriorBox::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_PriorBox_validate_and_infer_types); // shape node should have integer data type. For now we only allow i64 auto layer_shape_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, @@ -85,6 +86,7 @@ void op::PriorBox::validate_and_infer_types() shared_ptr op::PriorBox::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_PriorBox_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_attrs); } @@ -136,6 +138,7 @@ std::vector op::PriorBox::normalized_aspect_ratio(const std::vector& layer_shape, void op::PriorBoxClustered::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_PriorBoxClustered_validate_and_infer_types); // shape node should have integer data type. For now we only allow i64 auto layer_shape_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, @@ -90,12 +91,14 @@ void op::PriorBoxClustered::validate_and_infer_types() shared_ptr op::PriorBoxClustered::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_PriorBoxClustered_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_attrs); } bool op::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_PriorBoxClustered_visit_attributes); float step = 0; float step_w_tmp = m_attrs.step_widths; float step_h_tmp = m_attrs.step_heights; @@ -165,10 +168,6 @@ namespace prior_box_clustered bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate) - { - return prior_box_clustered::evaluate_prior_box( - inputs[0], inputs[1], outputs[0], get_attrs()); - } - return false; + NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate); + return prior_box_clustered::evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs()); } diff --git a/ngraph/core/src/op/proposal.cpp b/ngraph/core/src/op/proposal.cpp index caccf7695be..62cabece919 100644 --- a/ngraph/core/src/op/proposal.cpp +++ b/ngraph/core/src/op/proposal.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/proposal.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" @@ -35,6 +36,7 @@ op::v0::Proposal::Proposal(const Output& class_probs, void op::v0::Proposal::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Proposal_validate_and_infer_types); const auto& class_probs_pshape = get_input_partial_shape(0); const auto& class_bbox_deltas_pshape = get_input_partial_shape(1); const auto& image_shape_pshape = get_input_partial_shape(2); @@ -84,12 +86,14 @@ void op::v0::Proposal::validate_and_infer_types() shared_ptr op::v0::Proposal::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Proposal_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } bool op::v0::Proposal::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Proposal_visit_attributes); visitor.on_attribute("base_size", m_attrs.base_size); visitor.on_attribute("pre_nms_topn", m_attrs.pre_nms_topn); visitor.on_attribute("post_nms_topn", m_attrs.post_nms_topn); @@ -120,6 +124,7 @@ op::v4::Proposal::Proposal(const Output& class_probs, void op::v4::Proposal::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_Proposal_validate_and_infer_types); v0::Proposal::validate_and_infer_types(); const auto& class_probs_pshape = get_input_partial_shape(0); @@ -136,6 +141,7 @@ void op::v4::Proposal::validate_and_infer_types() std::shared_ptr op::v4::Proposal::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_Proposal_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } diff --git a/ngraph/core/src/op/psroi_pooling.cpp b/ngraph/core/src/op/psroi_pooling.cpp index b6217438e81..05bc6a08668 100644 --- a/ngraph/core/src/op/psroi_pooling.cpp +++ b/ngraph/core/src/op/psroi_pooling.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/psroi_pooling.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" using namespace std; @@ -43,6 +44,7 @@ op::PSROIPooling::PSROIPooling(const Output& input, bool ngraph::op::v0::PSROIPooling::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_PSROIPooling_visit_attributes); visitor.on_attribute("output_dim", m_output_dim); visitor.on_attribute("group_size", m_group_size); visitor.on_attribute("spatial_scale", m_spatial_scale); @@ -54,6 +56,7 @@ bool ngraph::op::v0::PSROIPooling::visit_attributes(AttributeVisitor& visitor) void op::PSROIPooling::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_PSROIPooling_validate_and_infer_types); auto feat_maps_et = get_input_element_type(0); auto coords_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, @@ -134,6 +137,7 @@ void op::PSROIPooling::validate_and_infer_types() shared_ptr op::PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_PSROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), diff --git a/ngraph/core/src/op/quantize.cpp b/ngraph/core/src/op/quantize.cpp index bad307f159f..891ab9e14a0 100644 --- a/ngraph/core/src/op/quantize.cpp +++ b/ngraph/core/src/op/quantize.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/quantize.hpp" +#include "itt.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/quantize.hpp" #include "ngraph/shape_util.hpp" @@ -43,6 +44,7 @@ op::Quantize::Quantize(const Output& input, void op::Quantize::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Quantize_validate_and_infer_types); enum { INPUT, @@ -159,6 +161,7 @@ void op::Quantize::validate_and_infer_types() shared_ptr op::Quantize::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Quantize_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes, m_round_mode); diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index 0abaf448d8c..f934994aa33 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -70,12 +70,14 @@ op::v4::Range::Range(const Output& start, bool ngraph::op::v4::Range::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_Range_visit_attributes); visitor.on_attribute("output_type", m_output_type); return true; } void op::v4::Range::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_Range_validate_and_infer_types); NODE_VALIDATION_CHECK(this, m_output_type.is_integral_number() || m_output_type.is_real(), "output tensor type should be a numeric type. Got: ", @@ -182,6 +184,7 @@ void op::v4::Range::validate_and_infer_types() shared_ptr op::v4::Range::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_Range_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_output_type); } @@ -300,15 +303,12 @@ namespace rangeop bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_Range_evaluate) - { - HostTensorPtr out = outputs[0]; - HostTensorPtr start = inputs[0]; - HostTensorPtr stop = inputs[1]; - HostTensorPtr step = inputs[2]; - return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4); - } - return false; + NGRAPH_OP_SCOPE(v4_Range_evaluate); + HostTensorPtr out = outputs[0]; + HostTensorPtr start = inputs[0]; + HostTensorPtr stop = inputs[1]; + HostTensorPtr step = inputs[2]; + return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4); } constexpr NodeTypeInfo op::v0::Range::type_info; @@ -421,11 +421,13 @@ static PartialShape infer_output_shape(const op::v0::Range* node, const element: bool ngraph::op::v0::Range::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Range_visit_attributes); return true; } void op::v0::Range::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Range_validate_and_infer_types); set_input_is_relevant_to_shape(0); set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(2); @@ -488,6 +490,7 @@ void op::v0::Range::validate_and_infer_types() shared_ptr op::v0::Range::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Range_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } @@ -499,13 +502,10 @@ void positive_range(T start_val, T stop_val, T step_val) bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(op_v0_Range_evaluate) - { - HostTensorPtr out = outputs[0]; - HostTensorPtr start = inputs[0]; - HostTensorPtr stop = inputs[1]; - HostTensorPtr step = inputs[2]; - return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0); - } - return false; + NGRAPH_OP_SCOPE(v0_Range_evaluate); + HostTensorPtr out = outputs[0]; + HostTensorPtr start = inputs[0]; + HostTensorPtr stop = inputs[1]; + HostTensorPtr step = inputs[2]; + return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0); } diff --git a/ngraph/core/src/op/read_value.cpp b/ngraph/core/src/op/read_value.cpp index 8394b9160eb..e48198c66e3 100644 --- a/ngraph/core/src/op/read_value.cpp +++ b/ngraph/core/src/op/read_value.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/read_value.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -30,6 +31,7 @@ op::ReadValue::ReadValue(const Output& init_value, const std::string& vari void op::ReadValue::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_ReadValue_validate_and_infer_types); auto arg_t = get_input_element_type(0); auto output_shape = get_input_partial_shape(0); @@ -43,12 +45,14 @@ void op::ReadValue::validate_and_infer_types() shared_ptr op::ReadValue::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_ReadValue_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_variable_id); } bool op::v3::ReadValue::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_ReadValue_visit_attributes); visitor.on_attribute("variable_id", m_variable_id); return true; } diff --git a/ngraph/core/src/op/reduce_l1.cpp b/ngraph/core/src/op/reduce_l1.cpp index 3a43774b666..cd659ab2158 100644 --- a/ngraph/core/src/op/reduce_l1.cpp +++ b/ngraph/core/src/op/reduce_l1.cpp @@ -41,6 +41,7 @@ shared_ptr op::v4::ReduceL1::get_default_value() const shared_ptr op::v4::ReduceL1::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_ReduceL1_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -81,10 +82,6 @@ namespace reduce_l1 bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate) - { - return reduce_l1::evaluate_sum( - inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate); + return reduce_l1::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/reduce_l2.cpp b/ngraph/core/src/op/reduce_l2.cpp index 442c3aecd96..da94f8fd729 100644 --- a/ngraph/core/src/op/reduce_l2.cpp +++ b/ngraph/core/src/op/reduce_l2.cpp @@ -41,6 +41,7 @@ shared_ptr op::v4::ReduceL2::get_default_value() const shared_ptr op::v4::ReduceL2::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_ReduceL2_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -79,10 +80,7 @@ namespace reduce_l2 bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate) - { - return reduce_l2::evaluate_reduce_l2( - inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate); + return reduce_l2::evaluate_reduce_l2( + inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index c7b2695e29e..2519b679b4e 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -36,6 +36,7 @@ op::v1::ReduceLogicalAnd::ReduceLogicalAnd(const Output& data, shared_ptr op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -75,12 +76,9 @@ namespace bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate) - { - const auto& data = inputs[0]; - const auto& axes = inputs[1]; - const auto& out = outputs[0]; - return evaluate_reduce_logical_and(data, axes, out, get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate); + const auto& data = inputs[0]; + const auto& axes = inputs[1]; + const auto& out = outputs[0]; + return evaluate_reduce_logical_and(data, axes, out, get_keep_dims()); } diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index 602fe18d094..c3e99c7682e 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -36,6 +36,7 @@ op::v1::ReduceLogicalOr::ReduceLogicalOr(const Output& data, shared_ptr op::v1::ReduceLogicalOr::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -75,12 +76,9 @@ namespace bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate) - { - const auto& data = inputs[0]; - const auto& axes = inputs[1]; - const auto& out = outputs[0]; - return evaluate_reduce_logical_or(data, axes, out, get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate); + const auto& data = inputs[0]; + const auto& axes = inputs[1]; + const auto& out = outputs[0]; + return evaluate_reduce_logical_or(data, axes, out, get_keep_dims()); } diff --git a/ngraph/core/src/op/reduce_mean.cpp b/ngraph/core/src/op/reduce_mean.cpp index d92e0881219..769e9a57af8 100644 --- a/ngraph/core/src/op/reduce_mean.cpp +++ b/ngraph/core/src/op/reduce_mean.cpp @@ -37,6 +37,7 @@ op::v1::ReduceMean::ReduceMean(const Output& arg, shared_ptr op::v1::ReduceMean::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceMean_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -78,9 +79,6 @@ namespace mean bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceMean_evaluate) - { - return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceMean_evaluate); + return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/reduce_prod.cpp b/ngraph/core/src/op/reduce_prod.cpp index 3b78b20d2e4..cb3f45ca782 100644 --- a/ngraph/core/src/op/reduce_prod.cpp +++ b/ngraph/core/src/op/reduce_prod.cpp @@ -41,6 +41,7 @@ shared_ptr op::v1::ReduceProd::get_default_value() const shared_ptr op::v1::ReduceProd::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceProd_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -82,10 +83,7 @@ namespace reduce_prod bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate) - { - return reduce_prod::evaluate_product( - inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate); + return reduce_prod::evaluate_product( + inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/reduce_sum.cpp b/ngraph/core/src/op/reduce_sum.cpp index b878b00fdf1..bfe2691e86b 100644 --- a/ngraph/core/src/op/reduce_sum.cpp +++ b/ngraph/core/src/op/reduce_sum.cpp @@ -42,6 +42,7 @@ shared_ptr op::v1::ReduceSum::get_default_value() const shared_ptr op::v1::ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_ReduceSum_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } @@ -83,10 +84,6 @@ namespace reduce_sum bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate) - { - return reduce_sum::evaluate_sum( - inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); - } - return false; + NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate); + return reduce_sum::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()); } diff --git a/ngraph/core/src/op/region_yolo.cpp b/ngraph/core/src/op/region_yolo.cpp index 4eed7f59904..0cf77658b7b 100644 --- a/ngraph/core/src/op/region_yolo.cpp +++ b/ngraph/core/src/op/region_yolo.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/region_yolo.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" using namespace std; @@ -46,6 +47,7 @@ op::RegionYolo::RegionYolo(const Output& input, bool ngraph::op::v0::RegionYolo::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_RegionYolo_visit_attributes); visitor.on_attribute("anchors", m_anchors); visitor.on_attribute("axis", m_axis); visitor.on_attribute("coords", m_num_coords); @@ -59,6 +61,7 @@ bool ngraph::op::v0::RegionYolo::visit_attributes(AttributeVisitor& visitor) void op::RegionYolo::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_RegionYolo_validate_and_infer_types); auto input_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, @@ -110,6 +113,7 @@ void op::RegionYolo::validate_and_infer_types() shared_ptr op::RegionYolo::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_RegionYolo_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_num_coords, diff --git a/ngraph/core/src/op/relu.cpp b/ngraph/core/src/op/relu.cpp index 71912929975..c4672fca2d2 100644 --- a/ngraph/core/src/op/relu.cpp +++ b/ngraph/core/src/op/relu.cpp @@ -35,6 +35,7 @@ op::Relu::Relu(const Output& arg) shared_ptr op::Relu::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Relu_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -71,14 +72,12 @@ namespace relu bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Relu_evaluate) - { - return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Relu_evaluate); + return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0))); } bool op::Relu::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Relu_visit_attributes); return true; } diff --git a/ngraph/core/src/op/reorg_yolo.cpp b/ngraph/core/src/op/reorg_yolo.cpp index d9ede137e59..6ee8cd7daca 100644 --- a/ngraph/core/src/op/reorg_yolo.cpp +++ b/ngraph/core/src/op/reorg_yolo.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/reorg_yolo.hpp" +#include "itt.hpp" #include "ngraph/runtime/reference/reorg_yolo.hpp" using namespace std; @@ -38,6 +39,7 @@ op::ReorgYolo::ReorgYolo(const Output& input, const size_t stride) void op::ReorgYolo::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_ReorgYolo_validate_and_infer_types); NODE_VALIDATION_CHECK(this, !m_strides.empty(), "Stride attribute is required."); auto input_et = get_input_element_type(0); @@ -75,12 +77,14 @@ void op::ReorgYolo::validate_and_infer_types() shared_ptr op::ReorgYolo::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_ReorgYolo_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_strides); } bool op::ReorgYolo::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_ReorgYolo_visit_attributes); visitor.on_attribute("stride", m_strides); return true; } diff --git a/ngraph/core/src/op/reshape.cpp b/ngraph/core/src/op/reshape.cpp index 30e8c1fbbb3..e9cd2254f85 100644 --- a/ngraph/core/src/op/reshape.cpp +++ b/ngraph/core/src/op/reshape.cpp @@ -67,12 +67,14 @@ op::v1::Reshape::Reshape(const Output& arg, const Output& shape_patt bool op::v1::Reshape::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Reshape_visit_attributes); visitor.on_attribute("special_zero", m_special_zero); return true; } void op::v1::Reshape::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Reshape_validate_and_infer_types); auto shape_pattern_et = get_input_element_type(1); // check data types NODE_VALIDATION_CHECK( @@ -196,10 +198,10 @@ void op::v1::Reshape::validate_and_infer_types() } else { - NODE_VALIDATION_CHECK( - this, - input_elements % output_elements == 0, - "Non-'-1' output dimensions do not evenly divide the input dimensions"); + NODE_VALIDATION_CHECK(this, + input_elements % output_elements == 0, + "Non-'-1' output dimensions do not evenly divide " + "the input dimensions"); partial_shape[negative_dim] = Dimension(input_elements / output_elements); } } @@ -223,6 +225,7 @@ void op::v1::Reshape::validate_and_infer_types() shared_ptr op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Reshape_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_special_zero); } @@ -230,10 +233,8 @@ shared_ptr op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_ #define COMPUTE_OUT_SHAPE_CASE(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a)) \ - { \ - reshapeop::compute_output_shape(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a)); \ + reshapeop::compute_output_shape(__VA_ARGS__); \ } \ break; @@ -345,8 +346,8 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, bool op::v1::Reshape::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Reshape_evaluate) { return evaluate_reshape(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_Reshape_evaluate); + return evaluate_reshape(outputs, inputs); } bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) diff --git a/ngraph/core/src/op/result.cpp b/ngraph/core/src/op/result.cpp index 51c29b0bdeb..0af343c88ab 100644 --- a/ngraph/core/src/op/result.cpp +++ b/ngraph/core/src/op/result.cpp @@ -37,11 +37,13 @@ op::Result::Result(const Output& arg, bool needs_default_layout) bool ngraph::op::v0::Result::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Result_visit_attributes); return true; } void op::Result::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Result_validate_and_infer_types); NODE_VALIDATION_CHECK( this, get_input_size() == 1, "Argument has ", get_input_size(), " outputs (1 expected)."); @@ -50,6 +52,7 @@ void op::Result::validate_and_infer_types() shared_ptr op::Result::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Result_clone_with_new_inputs); check_new_args_count(this, new_args); auto res = make_shared(new_args.at(0), m_needs_default_layout); @@ -58,15 +61,12 @@ shared_ptr op::Result::clone_with_new_inputs(const OutputVector& new_args) bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(Result_evaluate) - { - outputs[0]->set_unary(inputs[0]); - void* output = outputs[0]->get_data_ptr(); - void* input = inputs[0]->get_data_ptr(); - memcpy(output, input, outputs[0]->get_size_in_bytes()); - return true; - } - return false; + NGRAPH_OP_SCOPE(v0_Result_evaluate); + outputs[0]->set_unary(inputs[0]); + void* output = outputs[0]->get_data_ptr(); + void* input = inputs[0]->get_data_ptr(); + memcpy(output, input, outputs[0]->get_size_in_bytes()); + return true; } bool op::Result::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index 10e1264b82e..dd59e7a5b5a 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -18,6 +18,7 @@ #include #include #include "itt.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/function.hpp" @@ -51,12 +52,14 @@ op::v1::Reverse::Reverse(const Output& data, bool ngraph::op::v1::Reverse::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Reverse_visit_attributes); visitor.on_attribute("mode", m_mode); return true; } void op::v1::Reverse::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Reverse_validate_and_infer_types); if (m_mode == Mode::MASK) { NODE_VALIDATION_CHECK(this, @@ -135,6 +138,7 @@ void op::v1::Reverse::validate_and_infer_types() shared_ptr op::v1::Reverse::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Reverse_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_mode); } @@ -163,10 +167,8 @@ namespace reverseop #define GET_AXES(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a)) \ - { \ - reverseop::get_axes(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a)); \ + reverseop::get_axes(__VA_ARGS__); \ } \ break; @@ -213,8 +215,8 @@ bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs, bool op::v1::Reverse::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Reverse_evaluate) { return evaluate_reverse(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_Reverse_evaluate); + return evaluate_reverse(outputs, inputs); } namespace ngraph diff --git a/ngraph/core/src/op/reverse_sequence.cpp b/ngraph/core/src/op/reverse_sequence.cpp index b3d0b92eb2c..d0775295e51 100644 --- a/ngraph/core/src/op/reverse_sequence.cpp +++ b/ngraph/core/src/op/reverse_sequence.cpp @@ -16,6 +16,7 @@ #include #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/node.hpp" @@ -42,6 +43,7 @@ op::ReverseSequence::ReverseSequence(const Output& arg, bool ngraph::op::v0::ReverseSequence::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_ReverseSequence_visit_attributes); visitor.on_attribute("batch_axis", m_batch_axis); visitor.on_attribute("seq_axis", m_seq_axis); return true; @@ -49,6 +51,7 @@ bool ngraph::op::v0::ReverseSequence::visit_attributes(AttributeVisitor& visitor void op::ReverseSequence::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_ReverseSequence_validate_and_infer_types); auto input_shape = get_input_partial_shape(0); auto input_rank = input_shape.rank(); @@ -93,6 +96,7 @@ void op::ReverseSequence::validate_and_infer_types() shared_ptr op::ReverseSequence::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_ReverseSequence_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_batch_axis, m_seq_axis); } diff --git a/ngraph/core/src/op/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp index 80dba75a894..1e7e01b65ad 100644 --- a/ngraph/core/src/op/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -77,11 +77,13 @@ op::v0::RNNCell::RNNCell(const Output& X, bool op::v0::RNNCell::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_RNNCell_visit_attributes); return op::util::RNNCellBase::visit_attributes(visitor); } void op::v0::RNNCell::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_RNNCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -186,6 +188,7 @@ Output op::v0::RNNCell::get_default_bias_input() const shared_ptr op::v0::RNNCell::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_RNNCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { diff --git a/ngraph/core/src/op/rnn_sequence.cpp b/ngraph/core/src/op/rnn_sequence.cpp index 5087b631d1e..bca6ab8947a 100644 --- a/ngraph/core/src/op/rnn_sequence.cpp +++ b/ngraph/core/src/op/rnn_sequence.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/rnn_sequence.hpp" +#include "itt.hpp" #include "ngraph/op/util/recurrent_sequence.hpp" #include "ngraph/opsets/opset4.hpp" @@ -57,6 +58,7 @@ op::v5::RNNSequence::RNNSequence(const Output& X, void op::v5::RNNSequence::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_RNNSequence_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) @@ -169,6 +171,7 @@ void op::v5::RNNSequence::validate_and_infer_types() bool op::v5::RNNSequence::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_RNNSequence_visit_attributes); visitor.on_attribute("direction", m_direction); return op::util::RNNCellBase::visit_attributes(visitor); } @@ -176,6 +179,7 @@ bool op::v5::RNNSequence::visit_attributes(AttributeVisitor& visitor) shared_ptr op::v5::RNNSequence::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_RNNSequence_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), diff --git a/ngraph/core/src/op/roi_align.cpp b/ngraph/core/src/op/roi_align.cpp index 62abaf26bba..1ec0b991a41 100644 --- a/ngraph/core/src/op/roi_align.cpp +++ b/ngraph/core/src/op/roi_align.cpp @@ -64,6 +64,7 @@ op::v3::ROIAlign::ROIAlign(const Output& input, void op::v3::ROIAlign::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_ROIAlign_validate_and_infer_types); NODE_VALIDATION_CHECK( this, get_input_element_type(0).is_real() && get_input_element_type(1).is_real(), @@ -163,6 +164,7 @@ void op::v3::ROIAlign::validate_and_infer_types() bool op::v3::ROIAlign::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_ROIAlign_visit_attributes); visitor.on_attribute("pooled_h", m_pooled_h); visitor.on_attribute("pooled_w", m_pooled_w); visitor.on_attribute("sampling_ratio", m_sampling_ratio); @@ -174,6 +176,7 @@ bool op::v3::ROIAlign::visit_attributes(AttributeVisitor& visitor) shared_ptr op::v3::ROIAlign::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_ROIAlign_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), @@ -299,10 +302,7 @@ namespace roi_alinop bool op::v3::ROIAlign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v3_ROIAlign_evaluate) - { - return roi_alinop::evaluate_roi_align( - inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode); - } - return false; + NGRAPH_OP_SCOPE(v3_ROIAlign_evaluate); + return roi_alinop::evaluate_roi_align( + inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode); } diff --git a/ngraph/core/src/op/roi_pooling.cpp b/ngraph/core/src/op/roi_pooling.cpp index 2002dc3654a..6a141734b03 100644 --- a/ngraph/core/src/op/roi_pooling.cpp +++ b/ngraph/core/src/op/roi_pooling.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/roi_pooling.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -36,6 +37,7 @@ op::ROIPooling::ROIPooling(const Output& input, void op::ROIPooling::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_ROIPooling_validate_and_infer_types); auto feat_maps_et = get_input_element_type(0); auto coords_et = get_input_element_type(1); NODE_VALIDATION_CHECK( @@ -139,6 +141,7 @@ void op::ROIPooling::validate_and_infer_types() shared_ptr op::ROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_ROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), m_output_size, m_spatial_scale, m_method); @@ -146,6 +149,7 @@ shared_ptr op::ROIPooling::clone_with_new_inputs(const OutputVector& new_a bool op::ROIPooling::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_ROIPooling_visit_attributes); visitor.on_attribute("output_size", m_output_size); visitor.on_attribute("pooled_h", m_output_size[0]); visitor.on_attribute("pooled_w", m_output_size[1]); diff --git a/ngraph/core/src/op/round.cpp b/ngraph/core/src/op/round.cpp index 00b1001bf01..ae55b5f1916 100644 --- a/ngraph/core/src/op/round.cpp +++ b/ngraph/core/src/op/round.cpp @@ -87,30 +87,30 @@ op::v5::Round::Round(const Output& arg, RoundMode mode) bool ngraph::op::v5::Round::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v5_Round_visit_attributes); visitor.on_attribute("mode", m_mode); return true; } void op::v5::Round::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v5_Round_validate_and_infer_types); set_output_size(1); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } shared_ptr op::v5::Round::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v5_Round_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_mode); } bool op::v5::Round::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v5_Round_evaluate) - { - return roundop::evaluate_round( - inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode()); - } - return false; + NGRAPH_OP_SCOPE(v5_Round_evaluate); + return roundop::evaluate_round( + inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode()); } namespace ngraph diff --git a/ngraph/core/src/op/scatter_elements_update.cpp b/ngraph/core/src/op/scatter_elements_update.cpp index a27ce2fb5bc..b4576334488 100644 --- a/ngraph/core/src/op/scatter_elements_update.cpp +++ b/ngraph/core/src/op/scatter_elements_update.cpp @@ -37,11 +37,13 @@ op::v3::ScatterElementsUpdate::ScatterElementsUpdate(const Output& data, bool op::v3::ScatterElementsUpdate::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_visit_attributes); return true; } void op::v3::ScatterElementsUpdate::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_validate_and_infer_types); element::Type data_et = get_input_element_type(0); element::Type indices_et = get_input_element_type(1); element::Type updates_et = get_input_element_type(2); @@ -125,6 +127,7 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types() shared_ptr op::v3::ScatterElementsUpdate::clone_with_new_inputs(const OutputVector& inputs) const { + NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_clone_with_new_inputs); NODE_VALIDATION_CHECK(this, inputs.size() == get_input_size(), "clone_with_new_inputs() required inputs size: ", @@ -165,10 +168,8 @@ namespace scatter_element_update #define TYPE_AXS_CASE(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a)) \ - { \ - rc = evaluate(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a)); \ + rc = evaluate(__VA_ARGS__); \ } \ break; @@ -203,10 +204,8 @@ namespace scatter_element_update #define TYPE_IND_CASE(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a)) \ - { \ - rc = evaluate(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a)); \ + rc = evaluate(__VA_ARGS__); \ } \ break; @@ -299,9 +298,6 @@ bool op::v3::ScatterElementsUpdate::evaluate_scatter_element_update( bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate) - { - return evaluate_scatter_element_update(outputs, inputs); - } - return false; + NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate); + return evaluate_scatter_element_update(outputs, inputs); } diff --git a/ngraph/core/src/op/scatter_nd_update.cpp b/ngraph/core/src/op/scatter_nd_update.cpp index 54a67426cd4..c520cd67faa 100644 --- a/ngraph/core/src/op/scatter_nd_update.cpp +++ b/ngraph/core/src/op/scatter_nd_update.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/scatter_nd_update.hpp" +#include "itt.hpp" using namespace std; using namespace ngraph; @@ -23,6 +24,7 @@ constexpr NodeTypeInfo op::v3::ScatterNDUpdate::type_info; shared_ptr op::v3::ScatterNDUpdate::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_ScatterNDUpdate_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(op::util::ScatterNDBase::INPUTS), new_args.at(op::util::ScatterNDBase::INDICES), diff --git a/ngraph/core/src/op/scatter_update.cpp b/ngraph/core/src/op/scatter_update.cpp index 5201e81eefc..5e680480941 100644 --- a/ngraph/core/src/op/scatter_update.cpp +++ b/ngraph/core/src/op/scatter_update.cpp @@ -37,6 +37,7 @@ op::v3::ScatterUpdate::ScatterUpdate(const Output& data, shared_ptr op::v3::ScatterUpdate::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_ScatterUpdate_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); @@ -55,10 +56,8 @@ namespace scatter_update #define GET_INDICES(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a)) \ - { \ - indices_casted_vector = scatter_update::get_indices(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a)); \ + indices_casted_vector = scatter_update::get_indices(__VA_ARGS__); \ } \ break; @@ -114,6 +113,6 @@ bool op::v3::ScatterUpdate::evaluate_scatter_update(const HostTensorVector& outp bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate) { return evaluate_scatter_update(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate); + return evaluate_scatter_update(outputs, inputs); } diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index f9b2077b796..b8b19636521 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -44,6 +44,7 @@ op::v1::Select::Select(const Output& arg0, void op::v1::Select::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Select_validate_and_infer_types); // Condition element type check NODE_VALIDATION_CHECK(this, get_input_element_type(0).is_dynamic() || @@ -88,6 +89,7 @@ void op::v1::Select::validate_and_infer_types() shared_ptr op::v1::Select::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Select_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), m_auto_broadcast); @@ -95,6 +97,7 @@ shared_ptr op::v1::Select::clone_with_new_inputs(const OutputVector& new_a bool op::v1::Select::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Select_visit_attributes); visitor.on_attribute("auto_broadcast", m_auto_broadcast); return true; } @@ -156,11 +159,7 @@ namespace detail bool op::v1::Select::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v1_Select_evaluate) - { - const auto autob = get_auto_broadcast(); - return detail::evaluate_select( - output_values, input_values, autob, get_output_element_type(0)); - } - return false; + NGRAPH_OP_SCOPE(v1_Select_evaluate); + const auto autob = get_auto_broadcast(); + return detail::evaluate_select(output_values, input_values, autob, get_output_element_type(0)); } diff --git a/ngraph/core/src/op/selu.cpp b/ngraph/core/src/op/selu.cpp index 2b8eddc8547..6545b5e3bb4 100644 --- a/ngraph/core/src/op/selu.cpp +++ b/ngraph/core/src/op/selu.cpp @@ -14,6 +14,7 @@ // limitations under the License. //***************************************************************************** #include "ngraph/op/selu.hpp" +#include "itt.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" @@ -38,6 +39,7 @@ op::v0::Selu::Selu(const Output& data, const Output& alpha, const Ou bool ngraph::op::v0::Selu::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Selu_visit_attributes); return true; } @@ -62,6 +64,7 @@ OutputVector op::v0::Selu::decompose_op() const shared_ptr op::v0::Selu::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Selu_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index 4bd475fbcc9..98adff10d32 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -41,6 +41,7 @@ op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) void op::v3::ShapeOf::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_ShapeOf_validate_and_infer_types); NODE_VALIDATION_CHECK(this, m_output_type == element::i64 || m_output_type == element::i32, "Output type must be i32 or i64"); @@ -50,12 +51,14 @@ void op::v3::ShapeOf::validate_and_infer_types() bool ngraph::op::v3::ShapeOf::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_ShapeOf_visit_attributes); visitor.on_attribute("output_type", m_output_type); return true; } shared_ptr op::v3::ShapeOf::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_ShapeOf_clone_with_new_inputs); check_new_args_count(this, new_args); auto new_shape_of = make_shared(new_args.at(0), m_output_type); new_shape_of->set_is_foldable(m_is_foldable); @@ -154,11 +157,8 @@ namespace shape_of bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate) - { - return shape_of::evaluate_shape_of(output_values[0], input_values[0]); - } - return false; + NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate); + return shape_of::evaluate_shape_of(output_values[0], input_values[0]); } bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) @@ -180,17 +180,20 @@ op::v0::ShapeOf::ShapeOf(const Output& arg) void op::v0::ShapeOf::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_ShapeOf_validate_and_infer_types); set_input_is_relevant_to_value(0, false); set_output_type(0, element::i64, PartialShape{get_input_partial_shape(0).rank()}); } bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_ShapeOf_visit_attributes); return true; } shared_ptr op::v0::ShapeOf::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_ShapeOf_clone_with_new_inputs); check_new_args_count(this, new_args); auto new_shape_of = make_shared(new_args.at(0)); NGRAPH_CHECK(new_shape_of.get(), @@ -206,11 +209,8 @@ shared_ptr op::v0::ShapeOf::clone_with_new_inputs(const OutputVector& new_ bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate) - { - return shape_of::evaluate_shape_of(output_values[0], input_values[0]); - } - return false; + NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate); + return shape_of::evaluate_shape_of(output_values[0], input_values[0]); } bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) diff --git a/ngraph/core/src/op/shuffle_channels.cpp b/ngraph/core/src/op/shuffle_channels.cpp index 2bca88d5508..54a62db35d3 100644 --- a/ngraph/core/src/op/shuffle_channels.cpp +++ b/ngraph/core/src/op/shuffle_channels.cpp @@ -43,6 +43,7 @@ op::ShuffleChannels::ShuffleChannels(const Output& data, bool ngraph::op::v0::ShuffleChannels::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_ShuffleChannels_visit_attributes); visitor.on_attribute("axis", m_axis); visitor.on_attribute("group", m_group); return true; @@ -69,6 +70,7 @@ size_t op::ShuffleChannels::get_zero_based_axis() const void op::ShuffleChannels::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_ShuffleChannels_validate_and_infer_types); const auto& data_type = get_input_element_type(0); if (get_input_partial_shape(0).is_static()) { @@ -102,6 +104,7 @@ void op::ShuffleChannels::validate_and_infer_types() shared_ptr op::ShuffleChannels::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_ShuffleChannels_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Expected 1 element in new_args for the ShuffleChannels op but got " + @@ -187,6 +190,6 @@ bool op::ShuffleChannels::evaluate_shuffle_channels(const HostTensorVector& outp bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(ShuffleChannels_evaluate) { return evaluate_shuffle_channels(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v0_ShuffleChannels_evaluate); + return evaluate_shuffle_channels(outputs, inputs); } diff --git a/ngraph/core/src/op/sigmoid.cpp b/ngraph/core/src/op/sigmoid.cpp index f32c22323ce..f6115192e80 100644 --- a/ngraph/core/src/op/sigmoid.cpp +++ b/ngraph/core/src/op/sigmoid.cpp @@ -30,6 +30,7 @@ constexpr NodeTypeInfo op::Sigmoid::type_info; shared_ptr op::Sigmoid::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Sigmoid_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -72,9 +73,6 @@ namespace sigmoid bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Sigmoid_evaluate) - { - return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Sigmoid_evaluate); + return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/sign.cpp b/ngraph/core/src/op/sign.cpp index d917734f1fc..2bd12b4880f 100644 --- a/ngraph/core/src/op/sign.cpp +++ b/ngraph/core/src/op/sign.cpp @@ -34,11 +34,13 @@ op::Sign::Sign(const Output& arg) bool ngraph::op::v0::Sign::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Sign_visit_attributes); return true; } shared_ptr op::Sign::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Sign_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -75,9 +77,6 @@ namespace signop bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Sign_evaluate) - { - return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Sign_evaluate); + return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/sin.cpp b/ngraph/core/src/op/sin.cpp index 373d968b4fc..6aaa8dcea5a 100644 --- a/ngraph/core/src/op/sin.cpp +++ b/ngraph/core/src/op/sin.cpp @@ -36,11 +36,13 @@ op::Sin::Sin(const Output& arg) bool ngraph::op::v0::Sin::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Sin_visit_attributes); return true; } shared_ptr op::Sin::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Sin_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -77,9 +79,6 @@ namespace sinop bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Sin_evaluate) - { - return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Sin_evaluate); + return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/sinh.cpp b/ngraph/core/src/op/sinh.cpp index e7267a0bd66..4d28bdc9e1f 100644 --- a/ngraph/core/src/op/sinh.cpp +++ b/ngraph/core/src/op/sinh.cpp @@ -36,11 +36,13 @@ op::Sinh::Sinh(const Output& arg) bool ngraph::op::v0::Sinh::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Sinh_visit_attributes); return true; } shared_ptr op::Sinh::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Sinh_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -77,9 +79,6 @@ namespace sinhop bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Sinh_evaluate) - { - return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Sinh_evaluate); + return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/softmax.cpp b/ngraph/core/src/op/softmax.cpp index da57b2891a6..413d618b320 100644 --- a/ngraph/core/src/op/softmax.cpp +++ b/ngraph/core/src/op/softmax.cpp @@ -73,12 +73,14 @@ op::v1::Softmax::Softmax(const Output& arg, const size_t axis) bool ngraph::op::v1::Softmax::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Softmax_visit_attributes); visitor.on_attribute("axis", m_axis); return true; } void op::v1::Softmax::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Softmax_validate_and_infer_types); const PartialShape& input_shape = get_input_partial_shape(0); if (input_shape.rank().is_static()) NODE_VALIDATION_CHECK(this, @@ -94,6 +96,7 @@ void op::v1::Softmax::validate_and_infer_types() shared_ptr op::v1::Softmax::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Softmax_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_axis); } @@ -101,10 +104,7 @@ shared_ptr op::v1::Softmax::clone_with_new_inputs(const OutputVector& new_ bool op::v1::Softmax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Softmax_evaluate) - { - outputs[0]->set_unary(inputs[0]); - return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis}); - } - return false; + NGRAPH_OP_SCOPE(v1_Softmax_evaluate); + outputs[0]->set_unary(inputs[0]); + return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis}); } diff --git a/ngraph/core/src/op/softplus.cpp b/ngraph/core/src/op/softplus.cpp index f87f94c358d..042cafbffec 100644 --- a/ngraph/core/src/op/softplus.cpp +++ b/ngraph/core/src/op/softplus.cpp @@ -33,17 +33,20 @@ op::v4::SoftPlus::SoftPlus(const Output& arg) bool op::v4::SoftPlus::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_SoftPlus_visit_attributes); return true; } void op::v4::SoftPlus::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_SoftPlus_validate_and_infer_types); set_output_size(1); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } shared_ptr op::v4::SoftPlus::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_SoftPlus_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -77,9 +80,6 @@ namespace softplus bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_SoftPlus_evaluate) - { - return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v4_SoftPlus_evaluate); + return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index c8e67b2c193..99c14e2f5cd 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -44,6 +44,7 @@ ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output& d void op::v1::SpaceToBatch::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_SpaceToBatch_validate_and_infer_types); PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); const auto& block_shape_type = get_input_element_type(1); @@ -131,6 +132,7 @@ void op::v1::SpaceToBatch::validate_and_infer_types() std::shared_ptr ngraph::op::v1::SpaceToBatch::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_SpaceToBatch_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); @@ -138,6 +140,7 @@ std::shared_ptr bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_SpaceToBatch_visit_attributes); return true; } @@ -273,6 +276,6 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_SpaceToBatch) { return evaluate_space_to_batch(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v1_SpaceToBatch); + return evaluate_space_to_batch(outputs, inputs); } diff --git a/ngraph/core/src/op/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp index 683d97b64c9..aac3070fdb9 100644 --- a/ngraph/core/src/op/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -50,6 +50,7 @@ op::SpaceToDepth::SpaceToDepth(const Output& data, const std::string& mode bool ngraph::op::v0::SpaceToDepth::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_SpaceToDepth_visit_attributes); visitor.on_attribute("block_size", m_blocksize); visitor.on_attribute("mode", m_mode); return true; @@ -57,6 +58,7 @@ bool ngraph::op::v0::SpaceToDepth::visit_attributes(AttributeVisitor& visitor) shared_ptr op::SpaceToDepth::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_SpaceToDepth_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Incorrect number of new arguments"); @@ -66,6 +68,7 @@ shared_ptr op::SpaceToDepth::clone_with_new_inputs(const OutputVector& new void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_SpaceToDepth_validate_and_infer_types); PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); @@ -228,8 +231,8 @@ bool ngraph::op::v0::SpaceToDepth::evaluate_space_to_depth(const HostTensorVecto bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate) { return evaluate_space_to_depth(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate); + return evaluate_space_to_depth(outputs, inputs); } namespace ngraph diff --git a/ngraph/core/src/op/split.cpp b/ngraph/core/src/op/split.cpp index 83a13332b6b..f0ed5bcfb22 100644 --- a/ngraph/core/src/op/split.cpp +++ b/ngraph/core/src/op/split.cpp @@ -39,12 +39,14 @@ op::v1::Split::Split(const Output& data, const Output& axis, const s bool ngraph::op::v1::Split::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Split_visit_attributes); visitor.on_attribute("num_splits", m_num_splits); return true; } void op::v1::Split::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Split_validate_and_infer_types); const auto data_ps = input_value(0).get_partial_shape(); const auto axis_ps = input_value(1).get_partial_shape(); const auto axis_et = input_value(1).get_element_type(); @@ -102,6 +104,7 @@ void op::v1::Split::validate_and_infer_types() shared_ptr op::v1::Split::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Split_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_num_splits); } @@ -149,11 +152,8 @@ namespace split bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Split_evaluate) - { - const auto& data = inputs[0]; - const auto& axis = inputs[1]; - return split::evaluate_split(data, axis, outputs, m_num_splits, this); - } - return false; + NGRAPH_OP_SCOPE(v1_Split_evaluate); + const auto& data = inputs[0]; + const auto& axis = inputs[1]; + return split::evaluate_split(data, axis, outputs, m_num_splits, this); } diff --git a/ngraph/core/src/op/sqrt.cpp b/ngraph/core/src/op/sqrt.cpp index ba64e442222..c24bd0c2160 100644 --- a/ngraph/core/src/op/sqrt.cpp +++ b/ngraph/core/src/op/sqrt.cpp @@ -36,11 +36,13 @@ op::Sqrt::Sqrt(const Output& arg) bool ngraph::op::v0::Sqrt::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Sqrt_visit_attrinutes); return true; } shared_ptr op::Sqrt::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Sqrt_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -75,9 +77,6 @@ namespace sqrtop bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Sqrt_evaluate) - { - return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Sqrt_evaluate); + return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/squared_difference.cpp b/ngraph/core/src/op/squared_difference.cpp index c90ffb828b1..9d4069eedb3 100644 --- a/ngraph/core/src/op/squared_difference.cpp +++ b/ngraph/core/src/op/squared_difference.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/squared_difference.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/node.hpp" #include "ngraph/op/multiply.hpp" @@ -39,6 +40,7 @@ op::SquaredDifference::SquaredDifference(const Output& x1, bool ngraph::op::v0::SquaredDifference::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_SquaredDifference_visit_attributes); visitor.on_attribute("auto_broadcast", m_autobroadcast); return true; } @@ -55,6 +57,7 @@ OutputVector op::SquaredDifference::decompose_op() const shared_ptr op::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_SquaredDifference_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_autob()); diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index 40a4c749047..a2a2fde966c 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -113,6 +113,7 @@ void op::Squeeze::pre_validate_and_infer_types() bool ngraph::op::v0::Squeeze::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Squeeze_visit_attributes); return true; } @@ -132,6 +133,7 @@ OutputVector op::Squeeze::decompose_op() const shared_ptr op::Squeeze::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Squeeze_clone_with_new_inputs); if (new_args.size() != 2) { throw ngraph_error("Incorrect number of new arguments"); @@ -173,11 +175,8 @@ namespace squeeze bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Squeeze_evaluate) - { - return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]); - } - return false; + NGRAPH_OP_SCOPE(v0_Squeeze_evaluate); + return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]); } bool op::v0::Squeeze::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index 3085e0e96bc..ec442c467de 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -108,6 +108,7 @@ op::v1::StridedSlice::StridedSlice(const Output& data, bool op::v1::StridedSlice::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_StridedSlice_visit_attributes); visitor.on_attribute("begin_mask", m_begin_mask); visitor.on_attribute("end_mask", m_end_mask); visitor.on_attribute("new_axis_mask", m_new_axis_mask); @@ -118,6 +119,7 @@ bool op::v1::StridedSlice::visit_attributes(AttributeVisitor& visitor) void op::v1::StridedSlice::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_StridedSlice_validate_and_infer_types); const auto& begin_mask_et = get_input_element_type(1); const auto& end_mask_et = get_input_element_type(2); NODE_VALIDATION_CHECK(this, @@ -224,6 +226,7 @@ AxisSet op::v1::StridedSlice::convert_mask_to_axis_set(const std::vector op::v1::StridedSlice::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_StridedSlice_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), @@ -281,19 +284,15 @@ namespace strided_slice bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v1_StridedSlice_evaluate) - { - return strided_slice::evaluate_strided_slice( - input_values[0], - input_values[1], - input_values[2], - input_values[3], - convert_mask_to_axis_set(get_begin_mask()), - convert_mask_to_axis_set(get_end_mask()), - convert_mask_to_axis_set(get_new_axis_mask()), - convert_mask_to_axis_set(get_shrink_axis_mask()), - convert_mask_to_axis_set(get_ellipsis_mask()), - output_values[0]); - } - return false; + NGRAPH_OP_SCOPE(v1_StridedSlice_evaluate); + return strided_slice::evaluate_strided_slice(input_values[0], + input_values[1], + input_values[2], + input_values[3], + convert_mask_to_axis_set(get_begin_mask()), + convert_mask_to_axis_set(get_end_mask()), + convert_mask_to_axis_set(get_new_axis_mask()), + convert_mask_to_axis_set(get_shrink_axis_mask()), + convert_mask_to_axis_set(get_ellipsis_mask()), + output_values[0]); } diff --git a/ngraph/core/src/op/subtract.cpp b/ngraph/core/src/op/subtract.cpp index 791a9f514ff..24d5c61546e 100644 --- a/ngraph/core/src/op/subtract.cpp +++ b/ngraph/core/src/op/subtract.cpp @@ -76,6 +76,7 @@ op::v1::Subtract::Subtract(const Output& arg0, shared_ptr op::v1::Subtract::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Subtract_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } @@ -83,9 +84,6 @@ shared_ptr op::v1::Subtract::clone_with_new_inputs(const OutputVector& new bool op::v1::Subtract::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_Subtract_evaluate) - { - return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_Subtract_evaluate); + return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/swish.cpp b/ngraph/core/src/op/swish.cpp index ab008eba866..e43ee7cb7af 100644 --- a/ngraph/core/src/op/swish.cpp +++ b/ngraph/core/src/op/swish.cpp @@ -41,11 +41,13 @@ op::v4::Swish::Swish(const Output& arg, const Output& beta) bool op::v4::Swish::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v4_Swish_visit_attributes); return true; } void op::v4::Swish::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v4_Swish_validate_and_infer_types); auto inputs_count = input_values().size(); NODE_VALIDATION_CHECK(this, inputs_count == 1 || inputs_count == 2, @@ -76,6 +78,7 @@ void op::v4::Swish::validate_and_infer_types() shared_ptr op::v4::Swish::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v4_Swish_clone_with_new_inputs); if (new_args.size() == 1) { return make_shared(new_args.at(0)); @@ -128,9 +131,6 @@ namespace swish bool op::v4::Swish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v4_Swish_evaluate) - { - return swish::evaluate_swish(inputs, outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v4_Swish_evaluate); + return swish::evaluate_swish(inputs, outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/tan.cpp b/ngraph/core/src/op/tan.cpp index 66c605da13e..c8a360aa63b 100644 --- a/ngraph/core/src/op/tan.cpp +++ b/ngraph/core/src/op/tan.cpp @@ -37,11 +37,13 @@ op::Tan::Tan(const Output& arg) bool ngraph::op::v0::Tan::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Tan_visit_attributes); return true; } shared_ptr op::Tan::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Tan_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -78,9 +80,6 @@ namespace tanop bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Tan_evaluate) - { - return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Tan_evaluate); + return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/tanh.cpp b/ngraph/core/src/op/tanh.cpp index 12227975f14..73f120c2d3f 100644 --- a/ngraph/core/src/op/tanh.cpp +++ b/ngraph/core/src/op/tanh.cpp @@ -36,11 +36,13 @@ op::Tanh::Tanh(const Output& arg) bool ngraph::op::v0::Tanh::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Tanh_visit_attributes); return true; } shared_ptr op::Tanh::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Tanh_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } @@ -76,9 +78,6 @@ namespace tanhop bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Tanh_evaluate) - { - return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0))); - } - return false; + NGRAPH_OP_SCOPE(v0_Tanh_evaluate); + return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0))); } diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 1693bcbb5ca..e7370929a46 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/tensor_iterator.hpp" +#include "itt.hpp" #include "ngraph/factory.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/specialize_function.hpp" @@ -31,6 +32,7 @@ op::v0::TensorIterator::TensorIterator(const OutputVector& values) bool op::v0::TensorIterator::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_TensorIterator_visit_attributes); visitor.on_attribute("body", m_body); visitor.on_attribute("input_descriptions", m_input_descriptions); visitor.on_attribute("output_descriptions", m_output_descriptions); @@ -92,6 +94,7 @@ void op::v0::TensorIterator::revalidate_and_infer_types_for_body_ops() void op::v0::TensorIterator::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_TensorIterator_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_size() == m_input_descriptions.size(), "Number of inputs must be the same as number of input descriptions"); @@ -201,12 +204,12 @@ void op::v0::TensorIterator::validate_and_infer_types() if (body_value_shape.empty()) { - NODE_VALIDATION_CHECK( - this, - axis == 0, - "Axis must be equal to 0 if concatenated output tensor slices are scalars. " - "TensorIterator output index: ", - index); + NODE_VALIDATION_CHECK(this, + axis == 0, + "Axis must be equal to 0 if concatenated output " + "tensor slices are scalars. " + "TensorIterator output index: ", + index); out_shape = Shape(1); } @@ -244,6 +247,7 @@ std::shared_ptr op::v0::TensorIterator::get_function() std::shared_ptr op::v0::TensorIterator::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_TensorIterator_clone_with_new_inputs); auto op = make_shared(new_args); NGRAPH_CHECK(op.get(), op != nullptr, diff --git a/ngraph/core/src/op/tile.cpp b/ngraph/core/src/op/tile.cpp index 529535da349..6df28be1c59 100644 --- a/ngraph/core/src/op/tile.cpp +++ b/ngraph/core/src/op/tile.cpp @@ -33,11 +33,13 @@ op::v0::Tile::Tile(const Output& data, const Output& repeats) bool ngraph::op::v0::Tile::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Tile_visit_attributes); return true; } void op::v0::Tile::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Tile_validate_and_infer_types); auto arg_et = get_input_element_type(0); // Repeats should have integer data type. For now we only allow i64 @@ -92,6 +94,7 @@ void op::v0::Tile::validate_and_infer_types() shared_ptr op::v0::Tile::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Tile_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1)); } @@ -135,6 +138,6 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Tile_evaluate) { return evaluate_tile(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v0_Tile_evaluate); + return evaluate_tile(outputs, inputs); } diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index 6db15667138..51ce1ed2973 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -67,10 +67,8 @@ namespace topk #define EXECUTE_EVALUATE_TOPK(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(exec_topk_eval, _, a)) \ - { \ - rc = evaluate_execute(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(exec_topk_eval, _, a)); \ + rc = evaluate_execute(__VA_ARGS__); \ } \ break @@ -191,10 +189,8 @@ namespace topk #define CASE_GET_K(a, ...) \ case element::Type_t::a: \ { \ - NGRAPH_OP_SCOPE(OV_CC_CAT3(topk_get_k, _, a)) \ - { \ - k = get_k_from_hosttensor(__VA_ARGS__); \ - } \ + NGRAPH_OP_SCOPE(OV_CC_CAT3(topk_get_k, _, a)); \ + k = get_k_from_hosttensor(__VA_ARGS__); \ } \ break @@ -259,6 +255,7 @@ op::v1::TopK::TopK(const Output& data, bool ngraph::op::v1::TopK::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_TopK_visit_attributes); visitor.on_attribute("axis", m_axis); visitor.on_attribute("mode", m_mode); visitor.on_attribute("sort", m_sort); @@ -267,6 +264,7 @@ bool ngraph::op::v1::TopK::visit_attributes(AttributeVisitor& visitor) void op::v1::TopK::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_TopK_validate_and_infer_types); const auto& input_partial_shape = get_input_partial_shape(0); const auto input_rank = input_partial_shape.rank(); @@ -422,6 +420,7 @@ size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_consta shared_ptr op::v1::TopK::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_TopK_clone_with_new_inputs); check_new_args_count(this, new_args); auto new_v1_topk = make_shared(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort); @@ -455,48 +454,45 @@ void op::v1::TopK::set_k(size_t k) bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_TopK_evaluate) + NGRAPH_OP_SCOPE(v1_TopK_evaluate); + Shape arg_shape = inputs[0]->get_shape(); + // 1. get axis, mode ( max/min), sort_type + size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size()); + bool compute_max = get_mode() == TopKMode::MAX ? true : false; + SortType sort_type = get_sort_type(); + + // 2. get value of k - from constant node or from HT + size_t k = 0; + if (op::is_constant(input_value(1).get_node())) { - Shape arg_shape = inputs[0]->get_shape(); - // 1. get axis, mode ( max/min), sort_type - size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size()); - bool compute_max = get_mode() == TopKMode::MAX ? true : false; - SortType sort_type = get_sort_type(); - - // 2. get value of k - from constant node or from HT - size_t k = 0; - if (op::is_constant(input_value(1).get_node())) - { - k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(), - get_input_element_type(1)); - NGRAPH_CHECK(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis"); - } - else - { - k = topk::read_k_from_host_tensor(inputs[1]); - } - - // 3. Compute output_shape - auto output_shape = compute_output_shape(this->description(), inputs[0]->get_shape(), k); - - // do this after compute_output_shape - if (k == 0) - { - // the kernel can't handle k = 0, but output_shape[axis] = arg_shape[axis] - k = arg_shape[axis]; - } - - return topk::evaluate_topk(inputs[0], - outputs[1], - outputs[0], - output_shape, - axis, - k, - compute_max, - sort_type, - get_index_element_type()); + k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(), + get_input_element_type(1)); + NGRAPH_CHECK(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis"); } - return false; + else + { + k = topk::read_k_from_host_tensor(inputs[1]); + } + + // 3. Compute output_shape + auto output_shape = compute_output_shape(this->description(), inputs[0]->get_shape(), k); + + // do this after compute_output_shape + if (k == 0) + { + // the kernel can't handle k = 0, but output_shape[axis] = arg_shape[axis] + k = arg_shape[axis]; + } + + return topk::evaluate_topk(inputs[0], + outputs[1], + outputs[0], + output_shape, + axis, + k, + compute_max, + sort_type, + get_index_element_type()); } // v3 version starts @@ -526,6 +522,7 @@ op::v3::TopK::TopK(const Output& data, bool ngraph::op::v3::TopK::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v3_TopK_visit_attributes); visitor.on_attribute("axis", m_axis); visitor.on_attribute("mode", m_mode); visitor.on_attribute("sort", m_sort); @@ -535,6 +532,7 @@ bool ngraph::op::v3::TopK::visit_attributes(AttributeVisitor& visitor) void op::v3::TopK::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v3_TopK_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(1).is_integral_number(), "K input has to be an integer type, which does match the provided one:", @@ -567,6 +565,7 @@ size_t op::v3::TopK::read_k_from_constant_node(const shared_ptr& node, shared_ptr op::v3::TopK::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v3_TopK_clone_with_new_inputs); check_new_args_count(this, new_args); auto new_v3_topk = make_shared(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort); @@ -578,6 +577,6 @@ shared_ptr op::v3::TopK::clone_with_new_inputs(const OutputVector& new_arg bool op::v3::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v3_TopK_evaluate) { return op::v1::TopK::evaluate(outputs, inputs); } - return false; + NGRAPH_OP_SCOPE(v3_TopK_evaluate); + return op::v1::TopK::evaluate(outputs, inputs); } diff --git a/ngraph/core/src/op/transpose.cpp b/ngraph/core/src/op/transpose.cpp index db59e54be32..942a9a82218 100644 --- a/ngraph/core/src/op/transpose.cpp +++ b/ngraph/core/src/op/transpose.cpp @@ -34,11 +34,13 @@ op::v1::Transpose::Transpose(const Output& arg, const Output& input_ bool ngraph::op::v1::Transpose::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_Transpose_visit_attributes); return true; } void op::v1::Transpose::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_Transpose_validate_and_infer_types); const auto& input_order_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, input_order_et.is_dynamic() || input_order_et.is_integral_number(), @@ -82,6 +84,7 @@ void op::v1::Transpose::validate_and_infer_types() shared_ptr op::v1::Transpose::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_Transpose_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args[0], new_args[1]); } @@ -144,9 +147,6 @@ namespace transpose bool op::v1::Transpose::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { - NGRAPH_OP_SCOPE(v1_Transpose_evaluate) - { - return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]); - } - return false; + NGRAPH_OP_SCOPE(v1_Transpose_evaluate); + return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]); } diff --git a/ngraph/core/src/op/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp index 059ff32b6ec..8c3532debad 100644 --- a/ngraph/core/src/op/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -39,6 +39,7 @@ op::v0::Unsqueeze::Unsqueeze(const Output& data, const Output& axes) void op::v0::Unsqueeze::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_Unsqueeze_validate_and_infer_types); const auto data = input_value(0); auto data_partial_shape = data.get_partial_shape(); const auto data_rank = data_partial_shape.rank(); @@ -79,11 +80,13 @@ void op::v0::Unsqueeze::validate_and_infer_types() bool op::v0::Unsqueeze::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_Unsqueeze_visit_attributes); return true; } shared_ptr op::v0::Unsqueeze::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Unsqueeze_clone_with_new_inputs); if (new_args.size() != 2) { throw ngraph_error("Incorrect number of new arguments"); @@ -150,11 +153,8 @@ namespace unsqueeze bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Unsqueeze_evaluate) - { - return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]); - } - return false; + NGRAPH_OP_SCOPE(v0_Unsqueeze_evaluate); + return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]); } bool op::v0::Unsqueeze::constant_fold(OutputVector& output_values, diff --git a/ngraph/core/src/op/util/arithmetic_reduction.cpp b/ngraph/core/src/op/util/arithmetic_reduction.cpp index 09b17f95297..e7c8b68b6e8 100644 --- a/ngraph/core/src/op/util/arithmetic_reduction.cpp +++ b/ngraph/core/src/op/util/arithmetic_reduction.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/arithmetic_reduction.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/validation_util.hpp" @@ -69,6 +70,7 @@ void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_ void op::util::ArithmeticReduction::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_ArithmeticReduction_validate_and_infer_types); auto input_shape = get_input_partial_shape(0); const auto input_rank = input_shape.rank(); diff --git a/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp b/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp index ced0b236841..afaa68d823d 100644 --- a/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp +++ b/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/validation_util.hpp" @@ -33,12 +34,14 @@ op::util::ArithmeticReductionKeepDims::ArithmeticReductionKeepDims( bool ngraph::op::util::ArithmeticReductionKeepDims::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_util_ArithmeticReductionKeepDims_visit_attributes); visitor.on_attribute("keep_dims", m_keep_dims); return true; } void op::util::ArithmeticReductionKeepDims::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_util_ArithmeticReductionKeepDims_validate_and_infer_types); if (m_keep_dims) { auto input_shape = get_input_partial_shape(0); @@ -84,7 +87,7 @@ void op::util::ArithmeticReductionKeepDims::validate_and_infer_types() } else { - dims.push_back(Dimension{1}); + dims.emplace_back(Dimension{1}); } } result_shape = PartialShape(dims); diff --git a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp index 7f9b4afbeec..e4195a44c93 100644 --- a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/util/elementwise_args.hpp" @@ -54,11 +55,13 @@ void op::util::BinaryElementwiseArithmetic::validate_and_infer_elementwise_arith void op::util::BinaryElementwiseArithmetic::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseArithmetic_validate_and_infer_types); validate_and_infer_elementwise_arithmetic(m_autob); } bool op::util::BinaryElementwiseArithmetic::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseArithmetic_visit_attributes); visitor.on_attribute("auto_broadcast", m_autob); return true; } diff --git a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp index f8f35d99721..aa45e57bd14 100644 --- a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/util/elementwise_args.hpp" @@ -36,6 +37,7 @@ op::util::BinaryElementwiseComparison::BinaryElementwiseComparison(const Output< void op::util::BinaryElementwiseComparison::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseComparison_validate_and_infer_types); auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob); PartialShape& args_pshape = std::get<1>(args_et_pshape); @@ -44,6 +46,7 @@ void op::util::BinaryElementwiseComparison::validate_and_infer_types() bool op::util::BinaryElementwiseComparison::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseComparison_visit_attributes); visitor.on_attribute("auto_broadcast", m_autob); return true; } diff --git a/ngraph/core/src/op/util/binary_elementwise_logical.cpp b/ngraph/core/src/op/util/binary_elementwise_logical.cpp index 6c7dc0bf51f..2a4c52b70b1 100644 --- a/ngraph/core/src/op/util/binary_elementwise_logical.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_logical.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/util/elementwise_args.hpp" @@ -54,11 +55,13 @@ void op::util::BinaryElementwiseLogical::validate_and_infer_elementwise_logical( void op::util::BinaryElementwiseLogical::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseLogical_validate_and_infer_types); validate_and_infer_elementwise_logical(m_autob); } bool op::util::BinaryElementwiseLogical::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseLogical_visit_attributes); visitor.on_attribute("auto_broadcast", m_autob); return true; } diff --git a/ngraph/core/src/op/util/broadcast_base.cpp b/ngraph/core/src/op/util/broadcast_base.cpp index 50d371aaefe..2c66c3e6553 100644 --- a/ngraph/core/src/op/util/broadcast_base.cpp +++ b/ngraph/core/src/op/util/broadcast_base.cpp @@ -155,6 +155,7 @@ void op::util::BroadcastBase::validate_target_shape_none(const Shape& arg_shape, void op::util::BroadcastBase::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_BroadcastBase_validate_and_infer_types); // shape node should have integer data type. For now we only allow i64 auto shape_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, @@ -361,17 +362,14 @@ bool op::util::BroadcastBase::evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const AxisSet& broadcast_axes) const { - NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes) - { - runtime::reference::broadcast(arg0->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - out->get_shape(), - broadcast_axes, - arg0->get_element_type().size()); - return true; - } - return false; + NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes); + runtime::reference::broadcast(arg0->get_data_ptr(), + out->get_data_ptr(), + arg0->get_shape(), + out->get_shape(), + broadcast_axes, + arg0->get_element_type().size()); + return true; } namespace @@ -502,52 +500,48 @@ Shape op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) con bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate) + NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate); + Shape target_shape = get_target_shape(inputs[1]); + + PartialShape result_shape; + std::pair pair_broadcast_axes; + auto arg_shape = inputs[0]->get_shape(); + + if (m_mode.m_type == BroadcastType::NONE) { - Shape target_shape = get_target_shape(inputs[1]); - - PartialShape result_shape; - std::pair pair_broadcast_axes; - auto arg_shape = inputs[0]->get_shape(); - - if (m_mode.m_type == BroadcastType::NONE) + AxisVector axes_mapping_val; + const auto axes_mapping_constant = + as_type_ptr(input_value(2).get_node_shared_ptr()); + if (axes_mapping_constant) { - AxisVector axes_mapping_val; - const auto axes_mapping_constant = - as_type_ptr(input_value(2).get_node_shared_ptr()); - if (axes_mapping_constant) - { - axes_mapping_val = axes_mapping_constant->get_axis_vector_val(); - } - else - { - // read from HT and save as AxisVector - get_axis_vector_from_ht(inputs[2], axes_mapping_val, arg_shape); - } - pair_broadcast_axes = get_broadcast_axes_none(axes_mapping_val, target_shape.size()); - validate_target_shape_none(inputs[0]->get_shape(), axes_mapping_val, target_shape); - result_shape = target_shape; - } - else if (m_mode.m_type == BroadcastType::PDPD) - { - result_shape = get_result_shape_pdpd(arg_shape, target_shape, m_mode); - pair_broadcast_axes = - get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode); - } - else if (m_mode.m_type == BroadcastType::NUMPY) - { - result_shape = target_shape; - validate_target_shape_numpy(arg_shape, target_shape); - pair_broadcast_axes = - get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode); + axes_mapping_val = axes_mapping_constant->get_axis_vector_val(); } else { - ngraph_error("Unsupported BroadcastType "); + // read from HT and save as AxisVector + get_axis_vector_from_ht(inputs[2], axes_mapping_val, arg_shape); } - - return evaluate_broadcast( - inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape()); + pair_broadcast_axes = get_broadcast_axes_none(axes_mapping_val, target_shape.size()); + validate_target_shape_none(inputs[0]->get_shape(), axes_mapping_val, target_shape); + result_shape = target_shape; } - return false; + else if (m_mode.m_type == BroadcastType::PDPD) + { + result_shape = get_result_shape_pdpd(arg_shape, target_shape, m_mode); + pair_broadcast_axes = + get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode); + } + else if (m_mode.m_type == BroadcastType::NUMPY) + { + result_shape = target_shape; + validate_target_shape_numpy(arg_shape, target_shape); + pair_broadcast_axes = + get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode); + } + else + { + ngraph_error("Unsupported BroadcastType "); + } + + return evaluate_broadcast(inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape()); } diff --git a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp index 3fa1b09ba78..6b2b1d8231f 100644 --- a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/embeddingbag_offsets_base.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" using namespace std; @@ -51,6 +52,7 @@ op::util::EmbeddingBagOffsetsBase::EmbeddingBagOffsetsBase(const Output& e void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_EmbeddingBagOffsetsBase_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(OFFSETS) == element::i64 || get_input_element_type(OFFSETS) == element::i32, @@ -146,5 +148,6 @@ void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() bool op::util::EmbeddingBagOffsetsBase::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_EmbeddingBagOffsetsBase_visit_attributes); return true; } diff --git a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp index 7b9afd0f7ad..71a430bdfc9 100644 --- a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/embeddingbag_packed_base.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" using namespace std; @@ -39,6 +40,7 @@ op::util::EmbeddingBagPackedBase::EmbeddingBagPackedBase(const Output& emb void op::util::EmbeddingBagPackedBase::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_EmbeddingBagPackedBase_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(INDICES) == element::i64 || get_input_element_type(INDICES) == element::i32, @@ -94,5 +96,6 @@ void op::util::EmbeddingBagPackedBase::validate_and_infer_types() bool op::util::EmbeddingBagPackedBase::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_EmbeddingBagPackedBase_visit_attributes); return true; } diff --git a/ngraph/core/src/op/util/fused_op.cpp b/ngraph/core/src/op/util/fused_op.cpp index 349091d8984..8f256e5c238 100644 --- a/ngraph/core/src/op/util/fused_op.cpp +++ b/ngraph/core/src/op/util/fused_op.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/fused_op.hpp" +#include "itt.hpp" #include "ngraph/graph_util.hpp" @@ -34,6 +35,7 @@ op::util::FusedOp::FusedOp(const OutputVector& args) void op::util::FusedOp::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_FusedOp_validate_and_infer_types); pre_validate_and_infer_types(); if (!can_decompose_with_partial_shapes() && is_dynamic()) diff --git a/ngraph/core/src/op/util/index_reduction.cpp b/ngraph/core/src/op/util/index_reduction.cpp index f4fd0ab5dc1..0e9584fb9b7 100644 --- a/ngraph/core/src/op/util/index_reduction.cpp +++ b/ngraph/core/src/op/util/index_reduction.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/util/index_reduction.hpp" @@ -54,6 +55,7 @@ void op::util::IndexReduction::set_index_element_type(const element::Type& index void op::util::IndexReduction::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_IndexReduction_validate_and_infer_types); // TODO(amprocte): Should reject if size of reduction axis is zero. const PartialShape& arg_shape = get_input_partial_shape(0); Rank rank = arg_shape.rank(); @@ -105,6 +107,7 @@ void op::util::IndexReduction::validate_and_infer_types() bool op::util::IndexReduction::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_IndexReduction_visit_attributes); visitor.on_attribute("axis", m_axis); visitor.on_attribute("index_element_type", m_index_element_type); return true; diff --git a/ngraph/core/src/op/util/logical_reduction.cpp b/ngraph/core/src/op/util/logical_reduction.cpp index dbb12c3e025..b22d5880e30 100644 --- a/ngraph/core/src/op/util/logical_reduction.cpp +++ b/ngraph/core/src/op/util/logical_reduction.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/logical_reduction.hpp" +#include "itt.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/validation_util.hpp" @@ -64,6 +65,7 @@ void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axe void op::util::LogicalReduction::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_LogicalReduction_validate_and_infer_types); auto input_shape = get_input_partial_shape(0); auto input_rank = input_shape.rank(); diff --git a/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp b/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp index e5b3d86fc39..8cdfdda5c45 100644 --- a/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp +++ b/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/logical_reduction_keep_dims.hpp" +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/validation_util.hpp" @@ -33,12 +34,14 @@ op::util::LogicalReductionKeepDims::LogicalReductionKeepDims( bool ngraph::op::util::LogicalReductionKeepDims::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v0_util_LogicalReductionKeepDims_visit_attributes); visitor.on_attribute("keep_dims", m_keep_dims); return true; } void op::util::LogicalReductionKeepDims::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v0_util_LogicalReductionKeepDims_validate_and_infer_types); if (m_keep_dims) { const auto input_shape = get_input_partial_shape(0); @@ -86,7 +89,7 @@ void op::util::LogicalReductionKeepDims::validate_and_infer_types() } else { - dims.push_back(Dimension{1}); + dims.emplace_back(Dimension{1}); } } result_shape = PartialShape(dims); diff --git a/ngraph/core/src/op/util/rnn_cell_base.cpp b/ngraph/core/src/op/util/rnn_cell_base.cpp index 9a9c56e018d..288a389a94d 100644 --- a/ngraph/core/src/op/util/rnn_cell_base.cpp +++ b/ngraph/core/src/op/util/rnn_cell_base.cpp @@ -17,6 +17,7 @@ #include #include #include +#include "itt.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/add.hpp" @@ -88,6 +89,7 @@ op::util::RNNCellBase::RNNCellBase(const OutputVector& args, bool ngraph::op::util::RNNCellBase::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_RNNCellBase_visit_attributes); visitor.on_attribute("hidden_size", m_hidden_size); visitor.on_attribute("activations", m_activations); visitor.on_attribute("activations_alpha", m_activations_alpha); diff --git a/ngraph/core/src/op/util/scatter_base.cpp b/ngraph/core/src/op/util/scatter_base.cpp index cfad2e3428b..0759be39811 100644 --- a/ngraph/core/src/op/util/scatter_base.cpp +++ b/ngraph/core/src/op/util/scatter_base.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/scatter_base.hpp" +#include "itt.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" @@ -35,6 +36,7 @@ op::util::ScatterBase::ScatterBase(const Output& data, void op::util::ScatterBase::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_ScatterBase_validate_and_infer_types); const auto& data_et = get_input_element_type(DATA); const auto& indices_et = get_input_element_type(INDICES); const auto& updates_et = get_input_element_type(UPDATES); @@ -133,5 +135,6 @@ void op::util::ScatterBase::validate_and_infer_types() bool op::util::ScatterBase::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_ScatterBase_visit_attributes); return true; } diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index 2bb6b9cb8af..5cd4e5a395f 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/scatter_nd_base.hpp" +#include "itt.hpp" #include "ngraph/node.hpp" #include "ngraph/shape.hpp" @@ -36,11 +37,13 @@ op::util::ScatterNDBase::ScatterNDBase(const Output& data, bool op::util::ScatterNDBase::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_ScatterNDBase_visit_attributes); return true; } void op::util::ScatterNDBase::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_ScatterNDBase_validate_and_infer_types); element::Type inputs_et = get_input_element_type(INPUTS); element::Type indices_et = get_input_element_type(INDICES); element::Type updates_et = get_input_element_type(UPDATES); @@ -74,7 +77,8 @@ void op::util::ScatterNDBase::validate_and_infer_types() updates_shape.rank().get_length() == indices_shape.rank().get_length() + inputs_shape.rank().get_length() - indices_shape[indices_shape.rank().get_length() - 1].get_length() - 1, - "Rank of updates must be rank of inputs + rank of indices - last dimension of indices - 1"); + "Rank of updates must be rank of inputs + rank of indices - last dimension of indices " + "- 1"); bool compatible = true; if (inputs_shape.is_static() && indices_shape.is_static() && updates_shape.is_static()) diff --git a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp index 6ececc9b273..a580533a990 100644 --- a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "itt.hpp" #include "ngraph/op/util/elementwise_args.hpp" using namespace ngraph; @@ -46,10 +47,12 @@ void op::util::UnaryElementwiseArithmetic::validate_and_infer_elementwise_arithm void op::util::UnaryElementwiseArithmetic::validate_and_infer_types() { + NGRAPH_OP_SCOPE(util_UnaryElementwiseArithmetic_validate_and_infer_types); validate_and_infer_elementwise_arithmetic(); } bool op::util::UnaryElementwiseArithmetic::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(util_UnaryElementwiseArithmetic_visit_attributes); return true; } diff --git a/ngraph/core/src/op/variadic_split.cpp b/ngraph/core/src/op/variadic_split.cpp index b49b5f49296..19a6fbfe256 100644 --- a/ngraph/core/src/op/variadic_split.cpp +++ b/ngraph/core/src/op/variadic_split.cpp @@ -40,11 +40,13 @@ op::v1::VariadicSplit::VariadicSplit(const Output& data, bool ngraph::op::v1::VariadicSplit::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_VariadicSplit_visit_attributes); return true; } void ngraph::op::v1::VariadicSplit::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v1_VariadicSplit_validate_and_infer_types); set_input_is_relevant_to_value(0); set_input_is_relevant_to_value(1); set_input_is_relevant_to_value(2); @@ -143,6 +145,7 @@ void ngraph::op::v1::VariadicSplit::validate_and_infer_types() shared_ptr op::v1::VariadicSplit::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_VariadicSplit_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } @@ -216,6 +219,6 @@ bool op::v1::VariadicSplit::evaluate_variadic_split(const HostTensorVector& inpu bool op::v1::VariadicSplit::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate) { return evaluate_variadic_split(inputs, outputs); } - return false; + NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate); + return evaluate_variadic_split(inputs, outputs); } diff --git a/ngraph/core/src/op/xor.cpp b/ngraph/core/src/op/xor.cpp index fd6dcd0382a..a04928a2c14 100644 --- a/ngraph/core/src/op/xor.cpp +++ b/ngraph/core/src/op/xor.cpp @@ -34,12 +34,14 @@ op::v1::LogicalXor::LogicalXor(const Output& arg0, shared_ptr op::v1::LogicalXor::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v1_LogicalXor_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } bool ngraph::op::v1::LogicalXor::visit_attributes(AttributeVisitor& visitor) { + NGRAPH_OP_SCOPE(v1_LogicalXor_visit_attributes); BinaryElementwiseLogical::visit_attributes(visitor); return true; } @@ -86,11 +88,8 @@ namespace logxor bool op::v1::LogicalXor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v1_LogicalXor_evaluate) - { - return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v1_LogicalXor_evaluate); + return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()); } constexpr NodeTypeInfo op::v0::Xor::type_info; @@ -105,15 +104,13 @@ op::v0::Xor::Xor(const Output& arg0, shared_ptr op::v0::Xor::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_Xor_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } bool op::v0::Xor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - NGRAPH_OP_SCOPE(v0_Xor_evaluate) - { - return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()); - } - return false; + NGRAPH_OP_SCOPE(v0_Xor_evaluate); + return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index f04d7b4196e..7e1c407370c 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -49,6 +49,9 @@ set(SRC build_graph.cpp builder_autobroadcast.cpp check.cpp + conditional_compilation/ngraph_cc_collect.cpp + conditional_compilation/ngraph_cc_off.cpp + conditional_compilation/ngraph_cc_on.cpp constant.cpp constant_folding.cpp control_dependencies.cpp @@ -200,32 +203,6 @@ set(SRC util.cpp ) -# This code generates one source file per header file under ngraph/src where the source file -# has just a single #include statement. This checks that each header in the source tree is -# complete and self-contained so it can be included without requiring any other includes. -set(DIRECTORIES_IGNORED runtime frontend) -set(NGRAPH_MAIN_SRC_DIR "${CMAKE_SOURCE_DIR}/src/ngraph") -file(GLOB_RECURSE LIST_RECURSE - "${NGRAPH_MAIN_SRC_DIR}/builder/*.hpp" - "${NGRAPH_MAIN_SRC_DIR}/codegen/*.hpp" - "${NGRAPH_MAIN_SRC_DIR}/descriptor/*.hpp" - "${NGRAPH_MAIN_SRC_DIR}/distributed/*.hpp" - "${NGRAPH_MAIN_SRC_DIR}/op/*.hpp" - "${NGRAPH_MAIN_SRC_DIR}/pass/*.hpp" - "${NGRAPH_MAIN_SRC_DIR}/state*.hpp") -file(GLOB LIST - "${NGRAPH_MAIN_SRC_DIR}/*.hpp" - "${CMAKE_CURRENT_SOURCE_DIR}/runtime/*.hpp") -set(NGRAPH_HEADER_LIST ${LIST_RECURSE} ${LIST}) -list(APPEND NGRAPH_HEADER_LIST ${LIST}) -foreach(HEADER ${NGRAPH_HEADER_LIST}) - file(RELATIVE_PATH OUT_PATH ${NGRAPH_MAIN_SRC_DIR} ${HEADER}) - string(REGEX REPLACE "hpp$" "cpp" OUT_PATH ${OUT_PATH}) - set(OUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/include_test/${OUT_PATH}") - configure_file("header_standalone.in.cpp" ${OUT_FILE}) - list(APPEND SRC ${OUT_FILE}) -endforeach() - set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph") @@ -409,6 +386,8 @@ if(NGRAPH_ADDRESS_SANITIZER) endif() target_link_libraries(unit-test PRIVATE ngraph_test_util + openvino::conditional_compilation + openvino::itt ngraph ngraph::builder ngraph_backend diff --git a/ngraph/test/conditional_compilation/ngraph_cc_collect.cpp b/ngraph/test/conditional_compilation/ngraph_cc_collect.cpp new file mode 100644 index 00000000000..fdb09843037 --- /dev/null +++ b/ngraph/test/conditional_compilation/ngraph_cc_collect.cpp @@ -0,0 +1,58 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" + +#include + +#ifdef SELECTIVE_BUILD_ANALYZER +#define SELECTIVE_BUILD_ANALYZER_ON +#undef SELECTIVE_BUILD_ANALYZER +#elif defined(SELECTIVE_BUILD) +#define SELECTIVE_BUILD_ON +#undef SELECTIVE_BUILD +#endif + +#define SELECTIVE_BUILD_ANALYZER + +#include "../core/src/itt.hpp" + +using namespace std; + +TEST(conditional_compilation, collect_op_scope) +{ +#define ngraph_op_Scope0 1 + int n = 0; + + // Simple scope is enabled + NGRAPH_OP_SCOPE(Scope0); + n = 42; + EXPECT_EQ(n, 42); + + // Simple scope is disabled + NGRAPH_OP_SCOPE(Scope1); + n = 43; + EXPECT_EQ(n, 43); +#undef CCTests_Scope0 +} + +#undef SELECTIVE_BUILD_ANALYZER + +#ifdef SELECTIVE_BUILD_ANALYZER_ON +#define SELECTIVE_BUILD_ANALYZER +#elif defined(SELECTIVE_BUILD_ON) +#define SELECTIVE_BUILD +#endif diff --git a/ngraph/test/conditional_compilation/ngraph_cc_off.cpp b/ngraph/test/conditional_compilation/ngraph_cc_off.cpp new file mode 100644 index 00000000000..fe2a8199abb --- /dev/null +++ b/ngraph/test/conditional_compilation/ngraph_cc_off.cpp @@ -0,0 +1,52 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" + +#include + +#ifdef SELECTIVE_BUILD_ANALYZER +#define SELECTIVE_BUILD_ANALYZER_ON +#undef SELECTIVE_BUILD_ANALYZER +#elif defined(SELECTIVE_BUILD) +#define SELECTIVE_BUILD_ON +#undef SELECTIVE_BUILD +#endif + +#include "../core/src/itt.hpp" + +using namespace std; + +TEST(conditional_compilation, op_scope_with_disabled_cc) +{ + int n = 0; + + // Simple scope is enabled + NGRAPH_OP_SCOPE(Scope0); + n = 42; + EXPECT_EQ(n, 42); + + // Simple scope is disabled + NGRAPH_OP_SCOPE(Scope1); + n = 43; + EXPECT_EQ(n, 43); +} + +#ifdef SELECTIVE_BUILD_ANALYZER_ON +#define SELECTIVE_BUILD_ANALYZER +#elif defined(SELECTIVE_BUILD_ON) +#define SELECTIVE_BUILD +#endif diff --git a/ngraph/test/conditional_compilation/ngraph_cc_on.cpp b/ngraph/test/conditional_compilation/ngraph_cc_on.cpp new file mode 100644 index 00000000000..15dd93e8e6e --- /dev/null +++ b/ngraph/test/conditional_compilation/ngraph_cc_on.cpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" + +#include + +#ifdef SELECTIVE_BUILD_ANALYZER +#define SELECTIVE_BUILD_ANALYZER_ON +#undef SELECTIVE_BUILD_ANALYZER +#elif defined(SELECTIVE_BUILD) +#define SELECTIVE_BUILD_ON +#undef SELECTIVE_BUILD +#endif + +#define SELECTIVE_BUILD + +#include "../core/src/itt.hpp" + +using namespace std; + +TEST(conditional_compilation, disabled_op_scope) +{ +#define ngraph_op_Scope0 1 + int n = 0; + const std::string errMsg = "ngraph_op_Scope1 is disabled!"; + + // Simple scope is enabled + NGRAPH_OP_SCOPE(Scope0); + n = 42; + EXPECT_EQ(n, 42); + + // Simple scope is disabled + ASSERT_THROW(NGRAPH_OP_SCOPE(Scope1), ngraph::ngraph_error); +#undef CCTests_Scope0 +} + +#undef SELECTIVE_BUILD + +#ifdef SELECTIVE_BUILD_ANALYZER_ON +#define SELECTIVE_BUILD_ANALYZER +#elif defined(SELECTIVE_BUILD_ON) +#define SELECTIVE_BUILD +#endif diff --git a/openvino/conditional_compilation/include/openvino/cc/selective_build.h b/openvino/conditional_compilation/include/openvino/cc/selective_build.h index 05196f8928d..96e4cb64e6d 100644 --- a/openvino/conditional_compilation/include/openvino/cc/selective_build.h +++ b/openvino/conditional_compilation/include/openvino/cc/selective_build.h @@ -187,7 +187,7 @@ bool match(char const *region, Ctx && ctx, T && val, Case && cs, Cases&&... case } // namespace internal -#define OV_SCOPE(Module, region) \ +#define OV_SCOPE(Module, region) \ OV_ITT_SCOPED_TASK(OV_CC_CAT(SIMPLE_, Module), OV_CC_TOSTRING(region)); #define OV_SWITCH(Module, fn, ctx, val, ...) \