Changed OV_SCOPE semantic (#3692)

* Added if DEFINE construction

* Changed OV_SCOPE semantic

* Fixed the code style

* Fixed redundant lines
This commit is contained in:
Ilya Churaev 2020-12-22 18:29:41 +03:00 committed by GitHub
parent 967c040e19
commit 1926179b65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
101 changed files with 541 additions and 387 deletions

View File

@ -62,15 +62,16 @@ struct TestNode : public TestNodeBase {
TEST(ConditionalCompilationTests, SimpleScope) { TEST(ConditionalCompilationTests, SimpleScope) {
#define CCTests_Scope0 1 #define CCTests_Scope0 1
int n = 0; int n = 0;
// Simple scope is enabled // Simple scope is enabled
OV_SCOPE(CCTests, Scope0, n = 42;); OV_SCOPE(CCTests, Scope0) {
n = 42;
}
EXPECT_EQ(n, 42); EXPECT_EQ(n, 42);
// Simple scope is disabled // Simple scope is disabled
OV_SCOPE(CCTests, Scope1, n = 0;); OV_SCOPE(CCTests, Scope1) n = 43;
EXPECT_EQ(n, 42); EXPECT_EQ(n, 42);
#undef CCTests_Scope0 #undef CCTests_Scope0

View File

@ -63,10 +63,12 @@ struct TestNode : public TestNodeBase {
TEST(ConditionalCompilationTests, SimpleScopeAnalysys) { TEST(ConditionalCompilationTests, SimpleScopeAnalysys) {
int n = 0; int n = 0;
OV_SCOPE(CCTests, Scope0, n = 42;); OV_SCOPE(CCTests, Scope0) n = 42;
EXPECT_EQ(n, 42); EXPECT_EQ(n, 42);
OV_SCOPE(CCTests, Scope1, n = 43;); OV_SCOPE(CCTests, Scope1) {
n = 43;
}
EXPECT_EQ(n, 43); EXPECT_EQ(n, 43);
} }

View File

@ -115,10 +115,6 @@ namespace ngraph
const PartialShape input_partial_shape, const PartialShape input_partial_shape,
const int64_t k) const; const int64_t k) const;
void set_axis(const Rank input_rank, const int64_t axis); void set_axis(const Rank input_rank, const int64_t axis);
private:
bool evaluate_topk(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
}; };
} // namespace v1 } // namespace v1

View File

@ -40,26 +40,27 @@ namespace ngraph
} }
#if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER) #if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER)
#define NGRAPH_OP_SCOPE(region, ...) OV_SCOPE(ngraph_op, region, __VA_ARGS__) #define NGRAPH_OP_SCOPE(region) OV_SCOPE(ngraph_op, region)
#else #else
#define NGRAPH_OP_SCOPE(region, ...) \ #define NGRAPH_OP_SCOPE(region) OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region);
OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region); \
__VA_ARGS__
#endif #endif
#define NGRAPH_TYPE_CASE(region, a, ...) \ #define NGRAPH_TYPE_CASE(region, a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
OV_SCOPE( \ OV_SCOPE(ngraph_op, OV_CC_CAT3(region, _, a)) \
ngraph_op, OV_CC_CAT3(region, _, a), rc = evaluate<element::Type_t::a>(__VA_ARGS__)); \ { \
rc = evaluate<element::Type_t::a>(__VA_ARGS__); \
} \ } \
break; } \
break
#define NGRAPH_COPY_TENSOR(region, a, ...) \ #define NGRAPH_COPY_TENSOR(region, a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
OV_SCOPE(ngraph_op, \ OV_SCOPE(ngraph_op, OV_CC_CAT3(region, _, a)) \
OV_CC_CAT3(region, _, a), \ { \
rc = copy_tensor<element::Type_t::a>(__VA_ARGS__)); \ rc = copy_tensor<element::Type_t::a>(__VA_ARGS__); \
} \ } \
break; } \
break

View File

@ -74,8 +74,9 @@ namespace absop
bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Abs_evaluate)
v0_Abs_evaluate, {
rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)))); rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc; return rc;
} }

View File

@ -82,8 +82,9 @@ namespace acosop
bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Acos_evaluate)
v0_Acos_evaluate, {
rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)))); rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc; return rc;
} }

View File

@ -71,6 +71,6 @@ namespace acoshop
bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE(v3_Acosh_evaluate, rc = acoshop::evaluate_acosh(inputs[0], outputs[0])); NGRAPH_OP_SCOPE(v3_Acosh_evaluate) { rc = acoshop::evaluate_acosh(inputs[0], outputs[0]); }
return rc; return rc;
} }

View File

@ -94,7 +94,9 @@ shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE(v1_Add_evaluate, NGRAPH_OP_SCOPE(v1_Add_evaluate)
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob())); {
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
return rc; return rc;
} }

View File

@ -87,7 +87,9 @@ bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate, NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate)
rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob())); {
rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}
return rc; return rc;
} }

View File

@ -83,8 +83,9 @@ namespace asinop
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Asin_evaluate)
v0_Asin_evaluate, {
rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)))); rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc; return rc;
} }

View File

@ -71,6 +71,6 @@ namespace asinhop
bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE(v3_Asinh_evaluate, rc = asinhop::evaluate_asinh(inputs[0], outputs[0])); NGRAPH_OP_SCOPE(v3_Asinh_evaluate) { rc = asinhop::evaluate_asinh(inputs[0], outputs[0]); }
return rc; return rc;
} }

View File

@ -82,8 +82,9 @@ namespace atanop
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Atan_evaluate)
v0_Atan_evaluate, {
rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)))); rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc; return rc;
} }

View File

@ -71,6 +71,6 @@ namespace atanhop
bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
bool rc = false; bool rc = false;
NGRAPH_OP_SCOPE(v3_Atanh_evaluate, rc = atanhop::evaluate_atanh(inputs[0], outputs[0])); NGRAPH_OP_SCOPE(v3_Atanh_evaluate) { rc = atanhop::evaluate_atanh(inputs[0], outputs[0]); }
return rc; return rc;
} }

View File

@ -259,6 +259,6 @@ namespace
bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_BatchToSpace, return batch_to_space_evaluate(outputs, inputs)); NGRAPH_OP_SCOPE(v1_BatchToSpace) { return batch_to_space_evaluate(outputs, inputs); }
return false; return false;
} }

View File

@ -228,7 +228,7 @@ bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor)
bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs, bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v3_Broadcast_evaluate, return broadcast_evaluate(outputs, inputs)); NGRAPH_OP_SCOPE(v3_Broadcast_evaluate) { return broadcast_evaluate(outputs, inputs); }
return false; return false;
} }
@ -318,7 +318,9 @@ bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor)
bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs, bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Broadcast_evaluate, NGRAPH_OP_SCOPE(v1_Broadcast_evaluate)
return op::util::BroadcastBase::evaluate(outputs, inputs)); {
return op::util::BroadcastBase::evaluate(outputs, inputs);
}
return false; return false;
} }

View File

@ -83,8 +83,9 @@ namespace ceiling
bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Ceiling_evaluate)
v0_Ceiling_evaluate, {
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -86,10 +86,11 @@ namespace clamp
bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Clamp_evaluate)
v0_Clamp_evaluate, {
return clamp::evaluate_clamp( return clamp::evaluate_clamp(
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)))); inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)));
}
return false; return false;
} }

View File

@ -144,9 +144,10 @@ namespace
bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Concat_evaluate, NGRAPH_OP_SCOPE(v0_Concat_evaluate)
auto concat_axis = {
get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis(); auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
return evaluate_concat(inputs, outputs[0], concat_axis)); return evaluate_concat(inputs, outputs[0], concat_axis);
}
return false; return false;
} }

View File

@ -638,9 +638,12 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor)
bool op::v0::Constant::evaluate(const HostTensorVector& outputs, bool op::v0::Constant::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Constant_evaluate, auto output = outputs[0]; NGRAPH_OP_SCOPE(v0_Constant_evaluate)
{
auto output = outputs[0];
output->write(get_data_ptr(), output->get_size_in_bytes()); output->write(get_data_ptr(), output->get_size_in_bytes());
return true); return true;
}
return false; return false;
} }

View File

@ -66,8 +66,10 @@ namespace convert
#define TYPE_OUT_CASE(a, ...) \ #define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a)) \
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \ { \
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break break
@ -117,7 +119,9 @@ namespace convert
bool op::v0::Convert::evaluate(const HostTensorVector& output_values, bool op::v0::Convert::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE(v0_Convert_evaluate, NGRAPH_OP_SCOPE(v0_Convert_evaluate)
return convert::evaluate_convert(input_values[0], output_values[0])); {
return convert::evaluate_convert(input_values[0], output_values[0]);
}
return false; return false;
} }

View File

@ -78,8 +78,9 @@ namespace cosop
bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Cos_evaluate)
v0_Cos_evaluate, {
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -77,8 +77,9 @@ namespace coshop
bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Cosh_evaluate)
v0_Cosh_evaluate, {
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -243,7 +243,7 @@ bool op::DepthToSpace::evaluate_depth_to_space(const HostTensorVector& outputs,
bool op::DepthToSpace::evaluate(const HostTensorVector& outputs, bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate, return evaluate_depth_to_space(outputs, inputs)); NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate) { return evaluate_depth_to_space(outputs, inputs); }
return false; return false;
} }
namespace ngraph namespace ngraph

View File

@ -106,8 +106,10 @@ shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_a
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Divide_evaluate, NGRAPH_OP_SCOPE(v1_Divide_evaluate)
{
return divide::evaluate_divide( return divide::evaluate_divide(
inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv())); inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
}
return false; return false;
} }

View File

@ -83,7 +83,9 @@ shared_ptr<Node> op::v1::Equal::clone_with_new_inputs(const OutputVector& new_ar
bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Equal_evaluate, NGRAPH_OP_SCOPE(v1_Equal_evaluate)
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob())); {
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -76,8 +76,9 @@ namespace erfop
bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Erf_evaluate)
v0_Erf_evaluate, {
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -76,8 +76,9 @@ namespace expop
bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Exp_evaluate)
v0_Exp_evaluate, {
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -88,8 +88,9 @@ namespace floorop
bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Floor_evaluate)
v0_Floor_evaluate, {
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -82,9 +82,10 @@ namespace floor_mod
bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs, bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_FloorMod_evaluate)
v1_FloorMod_evaluate, {
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob())); return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -313,7 +313,7 @@ bool op::v1::Gather::evaluate_gather(const HostTensorVector& outputs,
bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Gather_evaluate, return evaluate_gather(outputs, inputs)); NGRAPH_OP_SCOPE(v1_Gather_evaluate) { return evaluate_gather(outputs, inputs); }
return false; return false;
} }

View File

@ -84,8 +84,9 @@ shared_ptr<Node> op::v1::Greater::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Greater::evaluate(const HostTensorVector& outputs, bool op::v1::Greater::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_Greater_evaluate)
v1_Greater_evaluate, {
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob())); return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -84,8 +84,10 @@ shared_ptr<Node> op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector&
bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs, bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate, NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate)
{
return greater_equalop::evaluate_greater_equal( return greater_equalop::evaluate_greater_equal(
inputs[0], inputs[1], outputs[0], get_autob())); inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -73,8 +73,9 @@ namespace
bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs, bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v5_HSigmoid_evaluate)
v5_HSigmoid_evaluate, {
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -72,8 +72,9 @@ namespace hswish
bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v4_HSwish_evaluate)
v4_HSwish_evaluate, {
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -497,7 +497,7 @@ bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs,
bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs, bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v4_Interpolate_evaluate, return evaluate_interpolate(outputs, inputs)); NGRAPH_OP_SCOPE(v4_Interpolate_evaluate) { return evaluate_interpolate(outputs, inputs); }
return false; return false;
} }

View File

@ -83,7 +83,9 @@ shared_ptr<Node> op::v1::Less::clone_with_new_inputs(const OutputVector& new_arg
bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Less_evaluate, NGRAPH_OP_SCOPE(v1_Less_evaluate)
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob())); {
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -84,8 +84,9 @@ namespace less_equalop
bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs, bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_LessEqual_evaluate)
v1_LessEqual_evaluate, {
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob())); return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -76,8 +76,9 @@ namespace logop
bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Log_evaluate)
v0_Log_evaluate, {
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -390,13 +390,15 @@ Output<Node> op::v5::Loop::get_concatenated_slices(const Output<Node>& value,
bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v5_Loop_evaluate, NGRAPH_OP_SCOPE(v5_Loop_evaluate)
{
runtime::reference::loop(m_body, runtime::reference::loop(m_body,
m_output_descriptions, m_output_descriptions,
m_input_descriptions, m_input_descriptions,
m_special_body_ports, m_special_body_ports,
outputs, outputs,
inputs); inputs);
return true); return true;
}
return false; return false;
} }

View File

@ -259,9 +259,11 @@ namespace matmul
bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_MatMul_evaluate, NGRAPH_OP_SCOPE(v0_MatMul_evaluate)
{
return matmul::evaluate_matmul( return matmul::evaluate_matmul(
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b())); inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
}
return false; return false;
} }

View File

@ -77,8 +77,9 @@ shared_ptr<Node> op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& ne
bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate)
v1_ReduceMax_evaluate, {
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -229,6 +229,6 @@ bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs,
bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs, bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_MaxPool_evaluate, return evaluate_maxpool(outputs, inputs)); NGRAPH_OP_SCOPE(v1_MaxPool_evaluate) { return evaluate_maxpool(outputs, inputs); }
return false; return false;
} }

View File

@ -91,8 +91,9 @@ shared_ptr<Node> op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Maximum::evaluate(const HostTensorVector& outputs, bool op::v1::Maximum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_Maximum_evaluate)
v1_Maximum_evaluate, {
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob())); return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -79,8 +79,9 @@ shared_ptr<Node> op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& ne
bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate)
v1_ReduceMin_evaluate, {
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -89,8 +89,9 @@ shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Minimum::evaluate(const HostTensorVector& outputs, bool op::v1::Minimum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_Minimum_evaluate)
v1_Minimum_evaluate, {
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob())); return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -77,8 +77,9 @@ namespace mish
bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v4_Mish_evaluate)
v4_Mish_evaluate, {
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -84,9 +84,10 @@ shared_ptr<Node> op::v0::Multiply::clone_with_new_inputs(const OutputVector& new
bool op::v0::Multiply::evaluate(const HostTensorVector& outputs, bool op::v0::Multiply::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Multiply_evaluate)
v0_Multiply_evaluate, {
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob())); return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }
@ -111,8 +112,9 @@ shared_ptr<Node> op::v1::Multiply::clone_with_new_inputs(const OutputVector& new
bool op::v1::Multiply::evaluate(const HostTensorVector& outputs, bool op::v1::Multiply::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_Multiply_evaluate)
v1_Multiply_evaluate, {
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob())); return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -73,9 +73,11 @@ namespace negativeop
bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Negative_evaluate, NGRAPH_OP_SCOPE(v0_Negative_evaluate)
{
return negativeop::evaluate_negative( return negativeop::evaluate_negative(
inputs[0], outputs[0], shape_size(get_output_shape(0)))); inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -118,10 +118,13 @@ namespace nonzero
#define TYPE_OUT_CASE(a, ...) \ #define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a)) \
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \ { \
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
} \ } \
break; } \
break
template <element::Type_t INPUT_ET> template <element::Type_t INPUT_ET>
bool evaluate(const HostTensorPtr& input, const HostTensorPtr& output) bool evaluate(const HostTensorPtr& input, const HostTensorPtr& output)
{ {
@ -158,6 +161,9 @@ namespace nonzero
bool op::v3::NonZero::evaluate(const HostTensorVector& outputs, bool op::v3::NonZero::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v3_NonZero_evaluate, return nonzero::evaluate_nonzero(inputs[0], outputs[0])); NGRAPH_OP_SCOPE(v3_NonZero_evaluate)
{
return nonzero::evaluate_nonzero(inputs[0], outputs[0]);
}
return false; return false;
} }

View File

@ -91,8 +91,9 @@ namespace notop
bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs, bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate)
v1_LogicalNot_evaluate, {
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -84,9 +84,10 @@ shared_ptr<Node> op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new
bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs, bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_NotEqual_evaluate)
v1_NotEqual_evaluate, {
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob())); return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -159,12 +159,14 @@ namespace detail
#define TYPE_OUT_CASE(a, ...) \ #define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a)) \
{ \
using IT = typename element_type_traits<element::Type_t::a>::value_type; \ using IT = typename element_type_traits<element::Type_t::a>::value_type; \
using OT = typename element_type_traits<out_t>::value_type; \ using OT = typename element_type_traits<out_t>::value_type; \
rc = evaluate<IT, OT>(__VA_ARGS__)); \ rc = evaluate<IT, OT>(__VA_ARGS__); \
} \ } \
break; } \
break
template <element::Type_t out_t> template <element::Type_t out_t>
bool evaluate(const HostTensorVector& output_values, bool evaluate(const HostTensorVector& output_values,
@ -206,7 +208,9 @@ namespace detail
bool op::v1::OneHot::evaluate(const HostTensorVector& output_values, bool op::v1::OneHot::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE(v1_OneHot_evaluate, NGRAPH_OP_SCOPE(v1_OneHot_evaluate)
return detail::evaluate_onehot(output_values, input_values, get_axis());); {
return detail::evaluate_onehot(output_values, input_values, get_axis());
}
return false; return false;
} }

View File

@ -82,7 +82,9 @@ namespace logor
bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs, bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate, NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate)
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob())); {
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -243,6 +243,6 @@ bool op::v1::Pad::evaluate_pad(const HostTensorVector& outputs,
bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Pad_evaluate, return evaluate_pad(outputs, inputs)); NGRAPH_OP_SCOPE(v1_Pad_evaluate) { return evaluate_pad(outputs, inputs); }
return false; return false;
} }

View File

@ -86,7 +86,9 @@ shared_ptr<Node> op::v1::Power::clone_with_new_inputs(const OutputVector& new_ar
bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Power_evaluate, NGRAPH_OP_SCOPE(v1_Power_evaluate)
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob())); {
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -127,7 +127,9 @@ namespace prelu
bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_PRelu_evaluate, NGRAPH_OP_SCOPE(v0_PRelu_evaluate)
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);); {
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);
}
return false; return false;
} }

View File

@ -192,11 +192,13 @@ namespace prior_box
bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs, bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_PriorBox_evaluate, NGRAPH_OP_SCOPE(v0_PriorBox_evaluate)
{
// Todo (itikhono): enable the use of the reference implementation after // Todo (itikhono): enable the use of the reference implementation after
// supporting constants as // supporting constants as
// outputs in plugins // outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs()); // return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false); return false;
}
return false; return false;
} }

View File

@ -165,11 +165,13 @@ namespace prior_box_clustered
bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs, bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate, NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate)
{
// Todo (itikhono): enable the use of the reference implementation after // Todo (itikhono): enable the use of the reference implementation after
// supporting constants as // supporting constants as
// outputs in plugins // outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs()); // return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false); return false;
}
return false; return false;
} }

View File

@ -300,11 +300,14 @@ namespace rangeop
bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v4_Range_evaluate, HostTensorPtr out = outputs[0]; NGRAPH_OP_SCOPE(v4_Range_evaluate)
{
HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0]; HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1]; HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2]; HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4)); return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4);
}
return false; return false;
} }
@ -496,10 +499,13 @@ void positive_range(T start_val, T stop_val, T step_val)
bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(op_v0_Range_evaluate)
op_v0_Range_evaluate, HostTensorPtr out = outputs[0]; HostTensorPtr start = inputs[0]; {
HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1]; HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2]; HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0)); return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0);
}
return false; return false;
} }

View File

@ -81,8 +81,10 @@ namespace reduce_l1
bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs, bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate, NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate)
{
return reduce_l1::evaluate_sum( return reduce_l1::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -79,8 +79,10 @@ namespace reduce_l2
bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs, bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate, NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate)
{
return reduce_l2::evaluate_reduce_l2( return reduce_l2::evaluate_reduce_l2(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -75,9 +75,12 @@ namespace
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate, const auto& data = inputs[0]; NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate)
{
const auto& data = inputs[0];
const auto& axes = inputs[1]; const auto& axes = inputs[1];
const auto& out = outputs[0]; const auto& out = outputs[0];
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims())); return evaluate_reduce_logical_and(data, axes, out, get_keep_dims());
}
return false; return false;
} }

View File

@ -75,9 +75,12 @@ namespace
bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate, const auto& data = inputs[0]; NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate)
{
const auto& data = inputs[0];
const auto& axes = inputs[1]; const auto& axes = inputs[1];
const auto& out = outputs[0]; const auto& out = outputs[0];
return evaluate_reduce_logical_or(data, axes, out, get_keep_dims())); return evaluate_reduce_logical_or(data, axes, out, get_keep_dims());
}
return false; return false;
} }

View File

@ -78,8 +78,9 @@ namespace mean
bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_ReduceMean_evaluate)
v1_ReduceMean_evaluate, {
return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -82,8 +82,10 @@ namespace reduce_prod
bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate, NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate)
{
return reduce_prod::evaluate_product( return reduce_prod::evaluate_product(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -83,8 +83,10 @@ namespace reduce_sum
bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate, NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate)
{
return reduce_sum::evaluate_sum( return reduce_sum::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims())); inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false; return false;
} }

View File

@ -71,9 +71,10 @@ namespace relu
bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Relu_evaluate)
v0_Relu_evaluate, {
return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -230,8 +230,10 @@ shared_ptr<Node> op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_
#define COMPUTE_OUT_SHAPE_CASE(a, ...) \ #define COMPUTE_OUT_SHAPE_CASE(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a)) \
reshapeop::compute_output_shape<element::Type_t::a>(__VA_ARGS__)); \ { \
reshapeop::compute_output_shape<element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break; break;
@ -343,7 +345,7 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs,
bool op::v1::Reshape::evaluate(const HostTensorVector& outputs, bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Reshape_evaluate, return evaluate_reshape(outputs, inputs)); NGRAPH_OP_SCOPE(v1_Reshape_evaluate) { return evaluate_reshape(outputs, inputs); }
return false; return false;
} }

View File

@ -58,11 +58,14 @@ shared_ptr<Node> op::Result::clone_with_new_inputs(const OutputVector& new_args)
bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(Result_evaluate, outputs[0]->set_unary(inputs[0]); NGRAPH_OP_SCOPE(Result_evaluate)
{
outputs[0]->set_unary(inputs[0]);
void* output = outputs[0]->get_data_ptr(); void* output = outputs[0]->get_data_ptr();
void* input = inputs[0]->get_data_ptr(); void* input = inputs[0]->get_data_ptr();
memcpy(output, input, outputs[0]->get_size_in_bytes()); memcpy(output, input, outputs[0]->get_size_in_bytes());
return true); return true;
}
return false; return false;
} }

View File

@ -163,8 +163,10 @@ namespace reverseop
#define GET_AXES(a, ...) \ #define GET_AXES(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a)) \
reverseop::get_axes<element::Type_t::a>(__VA_ARGS__)); \ { \
reverseop::get_axes<element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break; break;
@ -211,7 +213,7 @@ bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs,
bool op::v1::Reverse::evaluate(const HostTensorVector& outputs, bool op::v1::Reverse::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Reverse_evaluate, return evaluate_reverse(outputs, inputs)); NGRAPH_OP_SCOPE(v1_Reverse_evaluate) { return evaluate_reverse(outputs, inputs); }
return false; return false;
} }

View File

@ -299,9 +299,10 @@ namespace roi_alinop
bool op::v3::ROIAlign::evaluate(const HostTensorVector& outputs, bool op::v3::ROIAlign::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v3_ROIAlign_evaluate)
v3_ROIAlign_evaluate, {
return roi_alinop::evaluate_roi_align( return roi_alinop::evaluate_roi_align(
inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode)); inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode);
}
return false; return false;
} }

View File

@ -105,9 +105,11 @@ shared_ptr<Node> op::v5::Round::clone_with_new_inputs(const OutputVector& new_ar
bool op::v5::Round::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v5::Round::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v5_Round_evaluate, NGRAPH_OP_SCOPE(v5_Round_evaluate)
{
return roundop::evaluate_round( return roundop::evaluate_round(
inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode())); inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode());
}
return false; return false;
} }

View File

@ -165,8 +165,10 @@ namespace scatter_element_update
#define TYPE_AXS_CASE(a, ...) \ #define TYPE_AXS_CASE(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a)) \
rc = evaluate<DT, IT, element::Type_t::a>(__VA_ARGS__)); \ { \
rc = evaluate<DT, IT, element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break; break;
@ -201,8 +203,10 @@ namespace scatter_element_update
#define TYPE_IND_CASE(a, ...) \ #define TYPE_IND_CASE(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a)) \
rc = evaluate<DT, element::Type_t::a>(__VA_ARGS__)); \ { \
rc = evaluate<DT, element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break; break;
@ -295,7 +299,9 @@ bool op::v3::ScatterElementsUpdate::evaluate_scatter_element_update(
bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs, bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate, NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate)
return evaluate_scatter_element_update(outputs, inputs)); {
return evaluate_scatter_element_update(outputs, inputs);
}
return false; return false;
} }

View File

@ -55,9 +55,10 @@ namespace scatter_update
#define GET_INDICES(a, ...) \ #define GET_INDICES(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a)) \
indices_casted_vector = \ { \
scatter_update::get_indices<element::Type_t::a>(__VA_ARGS__)); \ indices_casted_vector = scatter_update::get_indices<element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break; break;
@ -113,6 +114,6 @@ bool op::v3::ScatterUpdate::evaluate_scatter_update(const HostTensorVector& outp
bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs, bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate, return evaluate_scatter_update(outputs, inputs)); NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate) { return evaluate_scatter_update(outputs, inputs); }
return false; return false;
} }

View File

@ -156,9 +156,11 @@ namespace detail
bool op::v1::Select::evaluate(const HostTensorVector& output_values, bool op::v1::Select::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE(v1_Select_evaluate, const auto autob = get_auto_broadcast(); NGRAPH_OP_SCOPE(v1_Select_evaluate)
{
const auto autob = get_auto_broadcast();
return detail::evaluate_select( return detail::evaluate_select(
output_values, input_values, autob, get_output_element_type(0))); output_values, input_values, autob, get_output_element_type(0));
}
return false; return false;
} }

View File

@ -154,8 +154,10 @@ namespace shape_of
bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values, bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate, NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate)
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);); {
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
return false; return false;
} }
@ -204,8 +206,10 @@ shared_ptr<Node> op::v0::ShapeOf::clone_with_new_inputs(const OutputVector& new_
bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values, bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate, NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate)
return shape_of::evaluate_shape_of(output_values[0], input_values[0])); {
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
return false; return false;
} }

View File

@ -187,6 +187,6 @@ bool op::ShuffleChannels::evaluate_shuffle_channels(const HostTensorVector& outp
bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs, bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(ShuffleChannels_evaluate, return evaluate_shuffle_channels(outputs, inputs)); NGRAPH_OP_SCOPE(ShuffleChannels_evaluate) { return evaluate_shuffle_channels(outputs, inputs); }
return false; return false;
} }

View File

@ -72,8 +72,9 @@ namespace sigmoid
bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Sigmoid_evaluate)
v0_Sigmoid_evaluate, {
return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -75,8 +75,9 @@ namespace signop
bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Sign_evaluate)
v0_Sign_evaluate, {
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -77,8 +77,9 @@ namespace sinop
bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Sin_evaluate)
v0_Sin_evaluate, {
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -77,8 +77,9 @@ namespace sinhop
bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Sinh_evaluate)
v0_Sinh_evaluate, {
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -101,7 +101,10 @@ shared_ptr<Node> op::v1::Softmax::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Softmax::evaluate(const HostTensorVector& outputs, bool op::v1::Softmax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Softmax_evaluate, outputs[0]->set_unary(inputs[0]); NGRAPH_OP_SCOPE(v1_Softmax_evaluate)
return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis})); {
outputs[0]->set_unary(inputs[0]);
return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis});
}
return false; return false;
} }

View File

@ -77,8 +77,9 @@ namespace softplus
bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs, bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v4_SoftPlus_evaluate)
v4_SoftPlus_evaluate, {
return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -273,6 +273,6 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto
bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_SpaceToBatch, return evaluate_space_to_batch(outputs, inputs)); NGRAPH_OP_SCOPE(v1_SpaceToBatch) { return evaluate_space_to_batch(outputs, inputs); }
return false; return false;
} }

View File

@ -228,7 +228,7 @@ bool ngraph::op::v0::SpaceToDepth::evaluate_space_to_depth(const HostTensorVecto
bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs, bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate, return evaluate_space_to_depth(outputs, inputs)); NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate) { return evaluate_space_to_depth(outputs, inputs); }
return false; return false;
} }

View File

@ -149,7 +149,11 @@ namespace split
bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_Split_evaluate, const auto& data = inputs[0]; const auto& axis = inputs[1]; NGRAPH_OP_SCOPE(v1_Split_evaluate)
return split::evaluate_split(data, axis, outputs, m_num_splits, this)); {
const auto& data = inputs[0];
const auto& axis = inputs[1];
return split::evaluate_split(data, axis, outputs, m_num_splits, this);
}
return false; return false;
} }

View File

@ -75,8 +75,9 @@ namespace sqrtop
bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Sqrt_evaluate)
v0_Sqrt_evaluate, {
return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -173,8 +173,10 @@ namespace squeeze
bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Squeeze_evaluate, NGRAPH_OP_SCOPE(v0_Squeeze_evaluate)
return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0])); {
return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]);
}
return false; return false;
} }

View File

@ -281,7 +281,8 @@ namespace strided_slice
bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values, bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE(v1_StridedSlice_evaluate, NGRAPH_OP_SCOPE(v1_StridedSlice_evaluate)
{
return strided_slice::evaluate_strided_slice( return strided_slice::evaluate_strided_slice(
input_values[0], input_values[0],
input_values[1], input_values[1],
@ -292,6 +293,7 @@ bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values,
convert_mask_to_axis_set(get_new_axis_mask()), convert_mask_to_axis_set(get_new_axis_mask()),
convert_mask_to_axis_set(get_shrink_axis_mask()), convert_mask_to_axis_set(get_shrink_axis_mask()),
convert_mask_to_axis_set(get_ellipsis_mask()), convert_mask_to_axis_set(get_ellipsis_mask()),
output_values[0])); output_values[0]);
}
return false; return false;
} }

View File

@ -83,8 +83,9 @@ shared_ptr<Node> op::v1::Subtract::clone_with_new_inputs(const OutputVector& new
bool op::v1::Subtract::evaluate(const HostTensorVector& outputs, bool op::v1::Subtract::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_Subtract_evaluate)
v1_Subtract_evaluate, {
return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob())); return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

View File

@ -128,8 +128,9 @@ namespace swish
bool op::v4::Swish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v4::Swish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v4_Swish_evaluate)
v4_Swish_evaluate, {
return swish::evaluate_swish(inputs, outputs[0], shape_size(get_output_shape(0)));); return swish::evaluate_swish(inputs, outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -78,8 +78,9 @@ namespace tanop
bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Tan_evaluate)
v0_Tan_evaluate, {
return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -76,8 +76,9 @@ namespace tanhop
bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v0_Tanh_evaluate)
v0_Tanh_evaluate, {
return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0)))); return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false; return false;
} }

View File

@ -135,6 +135,6 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs,
bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Tile_evaluate, return evaluate_tile(outputs, inputs)); NGRAPH_OP_SCOPE(v0_Tile_evaluate) { return evaluate_tile(outputs, inputs); }
return false; return false;
} }

View File

@ -67,8 +67,10 @@ namespace topk
#define EXECUTE_EVALUATE_TOPK(a, ...) \ #define EXECUTE_EVALUATE_TOPK(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(exec_topk_eval, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(exec_topk_eval, _, a)) \
rc = evaluate_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \ { \
rc = evaluate_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break break
@ -189,8 +191,10 @@ namespace topk
#define CASE_GET_K(a, ...) \ #define CASE_GET_K(a, ...) \
case element::Type_t::a: \ case element::Type_t::a: \
{ \ { \
NGRAPH_OP_SCOPE(OV_CC_CAT3(topk_get_k, _, a), \ NGRAPH_OP_SCOPE(OV_CC_CAT3(topk_get_k, _, a)) \
k = get_k_from_hosttensor<element::Type_t::a>(__VA_ARGS__)); \ { \
k = get_k_from_hosttensor<element::Type_t::a>(__VA_ARGS__); \
} \
} \ } \
break break
@ -449,9 +453,10 @@ void op::v1::TopK::set_k(size_t k)
op::Constant::create(element::i64, Shape{}, {k})->output(0)); op::Constant::create(element::i64, Shape{}, {k})->output(0));
} }
bool op::v1::TopK::evaluate_topk(const HostTensorVector& outputs, bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_TopK_evaluate)
{
Shape arg_shape = inputs[0]->get_shape(); Shape arg_shape = inputs[0]->get_shape();
// 1. get axis, mode ( max/min), sort_type // 1. get axis, mode ( max/min), sort_type
size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size()); size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size());
@ -490,11 +495,7 @@ bool op::v1::TopK::evaluate_topk(const HostTensorVector& outputs,
compute_max, compute_max,
sort_type, sort_type,
get_index_element_type()); get_index_element_type());
} }
bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_TopK_evaluate, return evaluate_topk(outputs, inputs));
return false; return false;
} }
@ -577,6 +578,6 @@ shared_ptr<Node> op::v3::TopK::clone_with_new_inputs(const OutputVector& new_arg
bool op::v3::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v3::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v3_TopK_evaluate, return op::v1::TopK::evaluate(outputs, inputs)); NGRAPH_OP_SCOPE(v3_TopK_evaluate) { return op::v1::TopK::evaluate(outputs, inputs); }
return false; return false;
} }

View File

@ -144,8 +144,9 @@ namespace transpose
bool op::v1::Transpose::evaluate(const HostTensorVector& output_values, bool op::v1::Transpose::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const const HostTensorVector& input_values) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(v1_Transpose_evaluate)
v1_Transpose_evaluate, {
return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0])); return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]);
}
return false; return false;
} }

View File

@ -150,8 +150,10 @@ namespace unsqueeze
bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs, bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Unsqueeze_evaluate, NGRAPH_OP_SCOPE(v0_Unsqueeze_evaluate)
return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0])); {
return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]);
}
return false; return false;
} }

View File

@ -361,14 +361,16 @@ bool op::util::BroadcastBase::evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& out, const HostTensorPtr& out,
const AxisSet& broadcast_axes) const const AxisSet& broadcast_axes) const
{ {
NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes, NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes)
{
runtime::reference::broadcast(arg0->get_data_ptr<const char>(), runtime::reference::broadcast(arg0->get_data_ptr<const char>(),
out->get_data_ptr<char>(), out->get_data_ptr<char>(),
arg0->get_shape(), arg0->get_shape(),
out->get_shape(), out->get_shape(),
broadcast_axes, broadcast_axes,
arg0->get_element_type().size()); arg0->get_element_type().size());
return true); return true;
}
return false; return false;
} }
@ -500,14 +502,16 @@ Shape op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) con
bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE( NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate)
util_BroadcastBase_evaluate, Shape target_shape = get_target_shape(inputs[1]); {
Shape target_shape = get_target_shape(inputs[1]);
PartialShape result_shape; PartialShape result_shape;
std::pair<bool, AxisSet> pair_broadcast_axes; std::pair<bool, AxisSet> pair_broadcast_axes;
auto arg_shape = inputs[0]->get_shape(); auto arg_shape = inputs[0]->get_shape();
if (m_mode.m_type == BroadcastType::NONE) { if (m_mode.m_type == BroadcastType::NONE)
{
AxisVector axes_mapping_val; AxisVector axes_mapping_val;
const auto axes_mapping_constant = const auto axes_mapping_constant =
as_type_ptr<op::v0::Constant>(input_value(2).get_node_shared_ptr()); as_type_ptr<op::v0::Constant>(input_value(2).get_node_shared_ptr());
@ -523,18 +527,27 @@ bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs,
pair_broadcast_axes = get_broadcast_axes_none(axes_mapping_val, target_shape.size()); pair_broadcast_axes = get_broadcast_axes_none(axes_mapping_val, target_shape.size());
validate_target_shape_none(inputs[0]->get_shape(), axes_mapping_val, target_shape); validate_target_shape_none(inputs[0]->get_shape(), axes_mapping_val, target_shape);
result_shape = target_shape; result_shape = target_shape;
} else if (m_mode.m_type == BroadcastType::PDPD) { }
else if (m_mode.m_type == BroadcastType::PDPD)
{
result_shape = get_result_shape_pdpd(arg_shape, target_shape, m_mode); result_shape = get_result_shape_pdpd(arg_shape, target_shape, m_mode);
pair_broadcast_axes = pair_broadcast_axes =
get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode); get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode);
} else if (m_mode.m_type == BroadcastType::NUMPY) { }
else if (m_mode.m_type == BroadcastType::NUMPY)
{
result_shape = target_shape; result_shape = target_shape;
validate_target_shape_numpy(arg_shape, target_shape); validate_target_shape_numpy(arg_shape, target_shape);
pair_broadcast_axes = pair_broadcast_axes =
get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode); get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode);
} else { ngraph_error("Unsupported BroadcastType "); } }
else
{
ngraph_error("Unsupported BroadcastType ");
}
return evaluate_broadcast( return evaluate_broadcast(
inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape())); inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape());
}
return false; return false;
} }

View File

@ -216,6 +216,6 @@ bool op::v1::VariadicSplit::evaluate_variadic_split(const HostTensorVector& inpu
bool op::v1::VariadicSplit::evaluate(const HostTensorVector& outputs, bool op::v1::VariadicSplit::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate, return evaluate_variadic_split(inputs, outputs)); NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate) { return evaluate_variadic_split(inputs, outputs); }
return false; return false;
} }

View File

@ -86,8 +86,10 @@ namespace logxor
bool op::v1::LogicalXor::evaluate(const HostTensorVector& outputs, bool op::v1::LogicalXor::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v1_LogicalXor_evaluate, NGRAPH_OP_SCOPE(v1_LogicalXor_evaluate)
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob())); {
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }
@ -109,7 +111,9 @@ shared_ptr<Node> op::v0::Xor::clone_with_new_inputs(const OutputVector& new_args
bool op::v0::Xor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v0::Xor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{ {
NGRAPH_OP_SCOPE(v0_Xor_evaluate, NGRAPH_OP_SCOPE(v0_Xor_evaluate)
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob())); {
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
}
return false; return false;
} }

Some files were not shown because too many files have changed in this diff Show More