Changed OV_SCOPE semantic (#3692)

* Added if DEFINE construction

* Changed OV_SCOPE semantic

* Fixed the code style

* Fixed redundant lines
This commit is contained in:
Ilya Churaev 2020-12-22 18:29:41 +03:00 committed by GitHub
parent 967c040e19
commit 1926179b65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
101 changed files with 541 additions and 387 deletions

View File

@ -62,15 +62,16 @@ struct TestNode : public TestNodeBase {
TEST(ConditionalCompilationTests, SimpleScope) {
#define CCTests_Scope0 1
int n = 0;
// Simple scope is enabled
OV_SCOPE(CCTests, Scope0, n = 42;);
OV_SCOPE(CCTests, Scope0) {
n = 42;
}
EXPECT_EQ(n, 42);
// Simple scope is disabled
OV_SCOPE(CCTests, Scope1, n = 0;);
OV_SCOPE(CCTests, Scope1) n = 43;
EXPECT_EQ(n, 42);
#undef CCTests_Scope0

View File

@ -63,10 +63,12 @@ struct TestNode : public TestNodeBase {
TEST(ConditionalCompilationTests, SimpleScopeAnalysys) {
int n = 0;
OV_SCOPE(CCTests, Scope0, n = 42;);
OV_SCOPE(CCTests, Scope0) n = 42;
EXPECT_EQ(n, 42);
OV_SCOPE(CCTests, Scope1, n = 43;);
OV_SCOPE(CCTests, Scope1) {
n = 43;
}
EXPECT_EQ(n, 43);
}

View File

@ -115,10 +115,6 @@ namespace ngraph
const PartialShape input_partial_shape,
const int64_t k) const;
void set_axis(const Rank input_rank, const int64_t axis);
private:
bool evaluate_topk(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1

View File

@ -40,26 +40,27 @@ namespace ngraph
}
#if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER)
#define NGRAPH_OP_SCOPE(region, ...) OV_SCOPE(ngraph_op, region, __VA_ARGS__)
#define NGRAPH_OP_SCOPE(region) OV_SCOPE(ngraph_op, region)
#else
#define NGRAPH_OP_SCOPE(region, ...) \
OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region); \
__VA_ARGS__
#define NGRAPH_OP_SCOPE(region) OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region);
#endif
#define NGRAPH_TYPE_CASE(region, a, ...) \
case element::Type_t::a: \
{ \
OV_SCOPE( \
ngraph_op, OV_CC_CAT3(region, _, a), rc = evaluate<element::Type_t::a>(__VA_ARGS__)); \
OV_SCOPE(ngraph_op, OV_CC_CAT3(region, _, a)) \
{ \
rc = evaluate<element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
break
#define NGRAPH_COPY_TENSOR(region, a, ...) \
case element::Type_t::a: \
{ \
OV_SCOPE(ngraph_op, \
OV_CC_CAT3(region, _, a), \
rc = copy_tensor<element::Type_t::a>(__VA_ARGS__)); \
OV_SCOPE(ngraph_op, OV_CC_CAT3(region, _, a)) \
{ \
rc = copy_tensor<element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
break

View File

@ -74,8 +74,9 @@ namespace absop
bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Abs_evaluate,
rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Abs_evaluate)
{
rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc;
}

View File

@ -82,8 +82,9 @@ namespace acosop
bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Acos_evaluate,
rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Acos_evaluate)
{
rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc;
}

View File

@ -71,6 +71,6 @@ namespace acoshop
bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(v3_Acosh_evaluate, rc = acoshop::evaluate_acosh(inputs[0], outputs[0]));
NGRAPH_OP_SCOPE(v3_Acosh_evaluate) { rc = acoshop::evaluate_acosh(inputs[0], outputs[0]); }
return rc;
}

View File

@ -94,7 +94,9 @@ shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(v1_Add_evaluate,
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Add_evaluate)
{
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
return rc;
}

View File

@ -87,7 +87,9 @@ bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate,
rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate)
{
rc = logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}
return rc;
}

View File

@ -83,8 +83,9 @@ namespace asinop
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Asin_evaluate,
rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Asin_evaluate)
{
rc = asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc;
}

View File

@ -71,6 +71,6 @@ namespace asinhop
bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(v3_Asinh_evaluate, rc = asinhop::evaluate_asinh(inputs[0], outputs[0]));
NGRAPH_OP_SCOPE(v3_Asinh_evaluate) { rc = asinhop::evaluate_asinh(inputs[0], outputs[0]); }
return rc;
}

View File

@ -82,8 +82,9 @@ namespace atanop
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Atan_evaluate,
rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Atan_evaluate)
{
rc = atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return rc;
}

View File

@ -71,6 +71,6 @@ namespace atanhop
bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
bool rc = false;
NGRAPH_OP_SCOPE(v3_Atanh_evaluate, rc = atanhop::evaluate_atanh(inputs[0], outputs[0]));
NGRAPH_OP_SCOPE(v3_Atanh_evaluate) { rc = atanhop::evaluate_atanh(inputs[0], outputs[0]); }
return rc;
}

View File

@ -259,6 +259,6 @@ namespace
bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_BatchToSpace, return batch_to_space_evaluate(outputs, inputs));
NGRAPH_OP_SCOPE(v1_BatchToSpace) { return batch_to_space_evaluate(outputs, inputs); }
return false;
}

View File

@ -228,7 +228,7 @@ bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor)
bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_Broadcast_evaluate, return broadcast_evaluate(outputs, inputs));
NGRAPH_OP_SCOPE(v3_Broadcast_evaluate) { return broadcast_evaluate(outputs, inputs); }
return false;
}
@ -318,7 +318,9 @@ bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor)
bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Broadcast_evaluate,
return op::util::BroadcastBase::evaluate(outputs, inputs));
NGRAPH_OP_SCOPE(v1_Broadcast_evaluate)
{
return op::util::BroadcastBase::evaluate(outputs, inputs);
}
return false;
}

View File

@ -83,8 +83,9 @@ namespace ceiling
bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Ceiling_evaluate,
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Ceiling_evaluate)
{
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -86,10 +86,11 @@ namespace clamp
bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Clamp_evaluate,
NGRAPH_OP_SCOPE(v0_Clamp_evaluate)
{
return clamp::evaluate_clamp(
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0))));
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)));
}
return false;
}

View File

@ -144,9 +144,10 @@ namespace
bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Concat_evaluate,
auto concat_axis =
get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
return evaluate_concat(inputs, outputs[0], concat_axis));
NGRAPH_OP_SCOPE(v0_Concat_evaluate)
{
auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
return evaluate_concat(inputs, outputs[0], concat_axis);
}
return false;
}

View File

@ -638,9 +638,12 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor)
bool op::v0::Constant::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Constant_evaluate, auto output = outputs[0];
output->write(get_data_ptr(), output->get_size_in_bytes());
return true);
NGRAPH_OP_SCOPE(v0_Constant_evaluate)
{
auto output = outputs[0];
output->write(get_data_ptr(), output->get_size_in_bytes());
return true;
}
return false;
}

View File

@ -66,8 +66,10 @@ namespace convert
#define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a), \
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_covert_out, _, a)) \
{ \
rc = evaluate<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
} \
} \
break
@ -117,7 +119,9 @@ namespace convert
bool op::v0::Convert::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(v0_Convert_evaluate,
return convert::evaluate_convert(input_values[0], output_values[0]));
NGRAPH_OP_SCOPE(v0_Convert_evaluate)
{
return convert::evaluate_convert(input_values[0], output_values[0]);
}
return false;
}

View File

@ -78,8 +78,9 @@ namespace cosop
bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Cos_evaluate,
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Cos_evaluate)
{
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -77,8 +77,9 @@ namespace coshop
bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Cosh_evaluate,
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Cosh_evaluate)
{
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -243,7 +243,7 @@ bool op::DepthToSpace::evaluate_depth_to_space(const HostTensorVector& outputs,
bool op::DepthToSpace::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate, return evaluate_depth_to_space(outputs, inputs));
NGRAPH_OP_SCOPE(v0_DepthToSpace_evaluate) { return evaluate_depth_to_space(outputs, inputs); }
return false;
}
namespace ngraph

View File

@ -106,8 +106,10 @@ shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_a
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Divide_evaluate,
return divide::evaluate_divide(
inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv()));
NGRAPH_OP_SCOPE(v1_Divide_evaluate)
{
return divide::evaluate_divide(
inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
}
return false;
}

View File

@ -83,7 +83,9 @@ shared_ptr<Node> op::v1::Equal::clone_with_new_inputs(const OutputVector& new_ar
bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Equal_evaluate,
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Equal_evaluate)
{
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -76,8 +76,9 @@ namespace erfop
bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Erf_evaluate,
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Erf_evaluate)
{
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -76,8 +76,9 @@ namespace expop
bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Exp_evaluate,
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Exp_evaluate)
{
return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -88,8 +88,9 @@ namespace floorop
bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Floor_evaluate,
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Floor_evaluate)
{
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -82,9 +82,10 @@ namespace floor_mod
bool op::v1::FloorMod::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_FloorMod_evaluate,
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_FloorMod_evaluate)
{
return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -313,7 +313,7 @@ bool op::v1::Gather::evaluate_gather(const HostTensorVector& outputs,
bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Gather_evaluate, return evaluate_gather(outputs, inputs));
NGRAPH_OP_SCOPE(v1_Gather_evaluate) { return evaluate_gather(outputs, inputs); }
return false;
}

View File

@ -84,8 +84,9 @@ shared_ptr<Node> op::v1::Greater::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Greater::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_Greater_evaluate,
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Greater_evaluate)
{
return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -84,8 +84,10 @@ shared_ptr<Node> op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector&
bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate,
return greater_equalop::evaluate_greater_equal(
inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_GreaterEqual_evaluate)
{
return greater_equalop::evaluate_greater_equal(
inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -73,8 +73,9 @@ namespace
bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v5_HSigmoid_evaluate,
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v5_HSigmoid_evaluate)
{
return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -72,8 +72,9 @@ namespace hswish
bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v4_HSwish_evaluate,
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v4_HSwish_evaluate)
{
return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -497,7 +497,7 @@ bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs,
bool op::v4::Interpolate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v4_Interpolate_evaluate, return evaluate_interpolate(outputs, inputs));
NGRAPH_OP_SCOPE(v4_Interpolate_evaluate) { return evaluate_interpolate(outputs, inputs); }
return false;
}

View File

@ -83,7 +83,9 @@ shared_ptr<Node> op::v1::Less::clone_with_new_inputs(const OutputVector& new_arg
bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Less_evaluate,
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Less_evaluate)
{
return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -84,8 +84,9 @@ namespace less_equalop
bool op::v1::LessEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_LessEqual_evaluate,
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_LessEqual_evaluate)
{
return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -76,8 +76,9 @@ namespace logop
bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Log_evaluate,
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Log_evaluate)
{
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -390,13 +390,15 @@ Output<Node> op::v5::Loop::get_concatenated_slices(const Output<Node>& value,
bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v5_Loop_evaluate,
runtime::reference::loop(m_body,
m_output_descriptions,
m_input_descriptions,
m_special_body_ports,
outputs,
inputs);
return true);
NGRAPH_OP_SCOPE(v5_Loop_evaluate)
{
runtime::reference::loop(m_body,
m_output_descriptions,
m_input_descriptions,
m_special_body_ports,
outputs,
inputs);
return true;
}
return false;
}

View File

@ -259,9 +259,11 @@ namespace matmul
bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_MatMul_evaluate,
return matmul::evaluate_matmul(
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b()));
NGRAPH_OP_SCOPE(v0_MatMul_evaluate)
{
return matmul::evaluate_matmul(
inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
}
return false;
}

View File

@ -77,8 +77,9 @@ shared_ptr<Node> op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& ne
bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_ReduceMax_evaluate,
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate)
{
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -229,6 +229,6 @@ bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs,
bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_MaxPool_evaluate, return evaluate_maxpool(outputs, inputs));
NGRAPH_OP_SCOPE(v1_MaxPool_evaluate) { return evaluate_maxpool(outputs, inputs); }
return false;
}

View File

@ -91,8 +91,9 @@ shared_ptr<Node> op::v1::Maximum::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Maximum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_Maximum_evaluate,
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Maximum_evaluate)
{
return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -79,8 +79,9 @@ shared_ptr<Node> op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& ne
bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_ReduceMin_evaluate,
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate)
{
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -89,8 +89,9 @@ shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Minimum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_Minimum_evaluate,
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Minimum_evaluate)
{
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -77,8 +77,9 @@ namespace mish
bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v4_Mish_evaluate,
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v4_Mish_evaluate)
{
return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -84,9 +84,10 @@ shared_ptr<Node> op::v0::Multiply::clone_with_new_inputs(const OutputVector& new
bool op::v0::Multiply::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Multiply_evaluate,
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v0_Multiply_evaluate)
{
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}
@ -111,8 +112,9 @@ shared_ptr<Node> op::v1::Multiply::clone_with_new_inputs(const OutputVector& new
bool op::v1::Multiply::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_Multiply_evaluate,
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Multiply_evaluate)
{
return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -73,9 +73,11 @@ namespace negativeop
bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Negative_evaluate,
return negativeop::evaluate_negative(
inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Negative_evaluate)
{
return negativeop::evaluate_negative(
inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -118,10 +118,13 @@ namespace nonzero
#define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a), \
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_nonzero_out, _, a)) \
{ \
rc = evaluate_nonzero_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
break
template <element::Type_t INPUT_ET>
bool evaluate(const HostTensorPtr& input, const HostTensorPtr& output)
{
@ -158,6 +161,9 @@ namespace nonzero
bool op::v3::NonZero::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_NonZero_evaluate, return nonzero::evaluate_nonzero(inputs[0], outputs[0]));
NGRAPH_OP_SCOPE(v3_NonZero_evaluate)
{
return nonzero::evaluate_nonzero(inputs[0], outputs[0]);
}
return false;
}

View File

@ -91,8 +91,9 @@ namespace notop
bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_LogicalNot_evaluate,
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v1_LogicalNot_evaluate)
{
return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -84,9 +84,10 @@ shared_ptr<Node> op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new
bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_NotEqual_evaluate,
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_NotEqual_evaluate)
{
return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -159,12 +159,14 @@ namespace detail
#define TYPE_OUT_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a), \
using IT = typename element_type_traits<element::Type_t::a>::value_type; \
using OT = typename element_type_traits<out_t>::value_type; \
rc = evaluate<IT, OT>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(evaluate_one_hot_out, _, a)) \
{ \
using IT = typename element_type_traits<element::Type_t::a>::value_type; \
using OT = typename element_type_traits<out_t>::value_type; \
rc = evaluate<IT, OT>(__VA_ARGS__); \
} \
} \
break;
break
template <element::Type_t out_t>
bool evaluate(const HostTensorVector& output_values,
@ -206,7 +208,9 @@ namespace detail
bool op::v1::OneHot::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(v1_OneHot_evaluate,
return detail::evaluate_onehot(output_values, input_values, get_axis()););
NGRAPH_OP_SCOPE(v1_OneHot_evaluate)
{
return detail::evaluate_onehot(output_values, input_values, get_axis());
}
return false;
}

View File

@ -82,7 +82,9 @@ namespace logor
bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate,
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_LogicalOr_evaluate)
{
return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -243,6 +243,6 @@ bool op::v1::Pad::evaluate_pad(const HostTensorVector& outputs,
bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Pad_evaluate, return evaluate_pad(outputs, inputs));
NGRAPH_OP_SCOPE(v1_Pad_evaluate) { return evaluate_pad(outputs, inputs); }
return false;
}

View File

@ -86,7 +86,9 @@ shared_ptr<Node> op::v1::Power::clone_with_new_inputs(const OutputVector& new_ar
bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Power_evaluate,
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Power_evaluate)
{
return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -127,7 +127,9 @@ namespace prelu
bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_PRelu_evaluate,
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]););
NGRAPH_OP_SCOPE(v0_PRelu_evaluate)
{
return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);
}
return false;
}

View File

@ -192,11 +192,13 @@ namespace prior_box
bool op::v0::PriorBox::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_PriorBox_evaluate,
// Todo (itikhono): enable the use of the reference implementation after
// supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false);
NGRAPH_OP_SCOPE(v0_PriorBox_evaluate)
{
// Todo (itikhono): enable the use of the reference implementation after
// supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false;
}
return false;
}

View File

@ -165,11 +165,13 @@ namespace prior_box_clustered
bool op::v0::PriorBoxClustered::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate,
// Todo (itikhono): enable the use of the reference implementation after
// supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false);
NGRAPH_OP_SCOPE(v0_PriorBoxClustered_evaluate)
{
// Todo (itikhono): enable the use of the reference implementation after
// supporting constants as
// outputs in plugins
// return evaluate_prior_box(inputs[0], inputs[1], outputs[0], get_attrs());
return false;
}
return false;
}

View File

@ -300,11 +300,14 @@ namespace rangeop
bool op::v4::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v4_Range_evaluate, HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4));
NGRAPH_OP_SCOPE(v4_Range_evaluate)
{
HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, m_output_type, 4);
}
return false;
}
@ -496,10 +499,13 @@ void positive_range(T start_val, T stop_val, T step_val)
bool op::v0::Range::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
op_v0_Range_evaluate, HostTensorPtr out = outputs[0]; HostTensorPtr start = inputs[0];
NGRAPH_OP_SCOPE(op_v0_Range_evaluate)
{
HostTensorPtr out = outputs[0];
HostTensorPtr start = inputs[0];
HostTensorPtr stop = inputs[1];
HostTensorPtr step = inputs[2];
return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0));
return rangeop::evaluate_power(out, start, stop, step, start->get_element_type(), 0);
}
return false;
}

View File

@ -81,8 +81,10 @@ namespace reduce_l1
bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate,
return reduce_l1::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v4_ReduceL1_evaluate)
{
return reduce_l1::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -79,8 +79,10 @@ namespace reduce_l2
bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate,
return reduce_l2::evaluate_reduce_l2(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v4_ReduceL2_evaluate)
{
return reduce_l2::evaluate_reduce_l2(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -75,9 +75,12 @@ namespace
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate, const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate)
{
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims());
}
return false;
}

View File

@ -75,9 +75,12 @@ namespace
bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate, const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_or(data, axes, out, get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate)
{
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_or(data, axes, out, get_keep_dims());
}
return false;
}

View File

@ -78,8 +78,9 @@ namespace mean
bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_ReduceMean_evaluate,
return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceMean_evaluate)
{
return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -82,8 +82,10 @@ namespace reduce_prod
bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate,
return reduce_prod::evaluate_product(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceProd_evaluate)
{
return reduce_prod::evaluate_product(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -83,8 +83,10 @@ namespace reduce_sum
bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate,
return reduce_sum::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims()));
NGRAPH_OP_SCOPE(v1_ReduceSum_evaluate)
{
return reduce_sum::evaluate_sum(
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return false;
}

View File

@ -71,9 +71,10 @@ namespace relu
bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Relu_evaluate,
return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Relu_evaluate)
{
return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -230,8 +230,10 @@ shared_ptr<Node> op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_
#define COMPUTE_OUT_SHAPE_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a), \
reshapeop::compute_output_shape<element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(compute_reshape_out_shape, _, a)) \
{ \
reshapeop::compute_output_shape<element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
@ -343,7 +345,7 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs,
bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Reshape_evaluate, return evaluate_reshape(outputs, inputs));
NGRAPH_OP_SCOPE(v1_Reshape_evaluate) { return evaluate_reshape(outputs, inputs); }
return false;
}

View File

@ -58,11 +58,14 @@ shared_ptr<Node> op::Result::clone_with_new_inputs(const OutputVector& new_args)
bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(Result_evaluate, outputs[0]->set_unary(inputs[0]);
void* output = outputs[0]->get_data_ptr();
void* input = inputs[0]->get_data_ptr();
memcpy(output, input, outputs[0]->get_size_in_bytes());
return true);
NGRAPH_OP_SCOPE(Result_evaluate)
{
outputs[0]->set_unary(inputs[0]);
void* output = outputs[0]->get_data_ptr();
void* input = inputs[0]->get_data_ptr();
memcpy(output, input, outputs[0]->get_size_in_bytes());
return true;
}
return false;
}

View File

@ -163,8 +163,10 @@ namespace reverseop
#define GET_AXES(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a), \
reverseop::get_axes<element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_reverse_axes, _, a)) \
{ \
reverseop::get_axes<element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
@ -211,7 +213,7 @@ bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs,
bool op::v1::Reverse::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Reverse_evaluate, return evaluate_reverse(outputs, inputs));
NGRAPH_OP_SCOPE(v1_Reverse_evaluate) { return evaluate_reverse(outputs, inputs); }
return false;
}

View File

@ -299,9 +299,10 @@ namespace roi_alinop
bool op::v3::ROIAlign::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v3_ROIAlign_evaluate,
NGRAPH_OP_SCOPE(v3_ROIAlign_evaluate)
{
return roi_alinop::evaluate_roi_align(
inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode));
inputs, outputs[0], m_pooled_h, m_pooled_w, m_sampling_ratio, m_spatial_scale, m_mode);
}
return false;
}

View File

@ -105,9 +105,11 @@ shared_ptr<Node> op::v5::Round::clone_with_new_inputs(const OutputVector& new_ar
bool op::v5::Round::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v5_Round_evaluate,
return roundop::evaluate_round(
inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode()));
NGRAPH_OP_SCOPE(v5_Round_evaluate)
{
return roundop::evaluate_round(
inputs[0], outputs[0], shape_size(get_output_shape(0)), get_mode());
}
return false;
}

View File

@ -165,8 +165,10 @@ namespace scatter_element_update
#define TYPE_AXS_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a), \
rc = evaluate<DT, IT, element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_axs, _, a)) \
{ \
rc = evaluate<DT, IT, element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
@ -201,8 +203,10 @@ namespace scatter_element_update
#define TYPE_IND_CASE(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a), \
rc = evaluate<DT, element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(scatter_element_update_ind, _, a)) \
{ \
rc = evaluate<DT, element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
@ -295,7 +299,9 @@ bool op::v3::ScatterElementsUpdate::evaluate_scatter_element_update(
bool op::v3::ScatterElementsUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate,
return evaluate_scatter_element_update(outputs, inputs));
NGRAPH_OP_SCOPE(v3_ScatterElementsUpdate_evaluate)
{
return evaluate_scatter_element_update(outputs, inputs);
}
return false;
}

View File

@ -55,9 +55,10 @@ namespace scatter_update
#define GET_INDICES(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a), \
indices_casted_vector = \
scatter_update::get_indices<element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(get_scatter_update_indices, _, a)) \
{ \
indices_casted_vector = scatter_update::get_indices<element::Type_t::a>(__VA_ARGS__); \
} \
} \
break;
@ -113,6 +114,6 @@ bool op::v3::ScatterUpdate::evaluate_scatter_update(const HostTensorVector& outp
bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate, return evaluate_scatter_update(outputs, inputs));
NGRAPH_OP_SCOPE(v3_ScatterUpdate_evaluate) { return evaluate_scatter_update(outputs, inputs); }
return false;
}

View File

@ -156,9 +156,11 @@ namespace detail
bool op::v1::Select::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(v1_Select_evaluate, const auto autob = get_auto_broadcast();
return detail::evaluate_select(
output_values, input_values, autob, get_output_element_type(0)));
NGRAPH_OP_SCOPE(v1_Select_evaluate)
{
const auto autob = get_auto_broadcast();
return detail::evaluate_select(
output_values, input_values, autob, get_output_element_type(0));
}
return false;
}

View File

@ -154,8 +154,10 @@ namespace shape_of
bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate,
return shape_of::evaluate_shape_of(output_values[0], input_values[0]););
NGRAPH_OP_SCOPE(v3_ShapeOf_evaluate)
{
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
return false;
}
@ -204,8 +206,10 @@ shared_ptr<Node> op::v0::ShapeOf::clone_with_new_inputs(const OutputVector& new_
bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate,
return shape_of::evaluate_shape_of(output_values[0], input_values[0]));
NGRAPH_OP_SCOPE(v0_ShapeOf_evaluate)
{
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
return false;
}

View File

@ -187,6 +187,6 @@ bool op::ShuffleChannels::evaluate_shuffle_channels(const HostTensorVector& outp
bool op::ShuffleChannels::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(ShuffleChannels_evaluate, return evaluate_shuffle_channels(outputs, inputs));
NGRAPH_OP_SCOPE(ShuffleChannels_evaluate) { return evaluate_shuffle_channels(outputs, inputs); }
return false;
}

View File

@ -72,8 +72,9 @@ namespace sigmoid
bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Sigmoid_evaluate,
return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Sigmoid_evaluate)
{
return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -75,8 +75,9 @@ namespace signop
bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Sign_evaluate,
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Sign_evaluate)
{
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -77,8 +77,9 @@ namespace sinop
bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Sin_evaluate,
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Sin_evaluate)
{
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -77,8 +77,9 @@ namespace sinhop
bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Sinh_evaluate,
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Sinh_evaluate)
{
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -101,7 +101,10 @@ shared_ptr<Node> op::v1::Softmax::clone_with_new_inputs(const OutputVector& new_
bool op::v1::Softmax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Softmax_evaluate, outputs[0]->set_unary(inputs[0]);
return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis}));
NGRAPH_OP_SCOPE(v1_Softmax_evaluate)
{
outputs[0]->set_unary(inputs[0]);
return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis});
}
return false;
}

View File

@ -77,8 +77,9 @@ namespace softplus
bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v4_SoftPlus_evaluate,
return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v4_SoftPlus_evaluate)
{
return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -273,6 +273,6 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto
bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_SpaceToBatch, return evaluate_space_to_batch(outputs, inputs));
NGRAPH_OP_SCOPE(v1_SpaceToBatch) { return evaluate_space_to_batch(outputs, inputs); }
return false;
}

View File

@ -228,7 +228,7 @@ bool ngraph::op::v0::SpaceToDepth::evaluate_space_to_depth(const HostTensorVecto
bool ngraph::op::v0::SpaceToDepth::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate, return evaluate_space_to_depth(outputs, inputs));
NGRAPH_OP_SCOPE(v0_SpaceToDepth_evaluate) { return evaluate_space_to_depth(outputs, inputs); }
return false;
}

View File

@ -149,7 +149,11 @@ namespace split
bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_Split_evaluate, const auto& data = inputs[0]; const auto& axis = inputs[1];
return split::evaluate_split(data, axis, outputs, m_num_splits, this));
NGRAPH_OP_SCOPE(v1_Split_evaluate)
{
const auto& data = inputs[0];
const auto& axis = inputs[1];
return split::evaluate_split(data, axis, outputs, m_num_splits, this);
}
return false;
}

View File

@ -75,8 +75,9 @@ namespace sqrtop
bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Sqrt_evaluate,
return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Sqrt_evaluate)
{
return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -173,8 +173,10 @@ namespace squeeze
bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Squeeze_evaluate,
return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]));
NGRAPH_OP_SCOPE(v0_Squeeze_evaluate)
{
return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]);
}
return false;
}

View File

@ -281,17 +281,19 @@ namespace strided_slice
bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(v1_StridedSlice_evaluate,
return strided_slice::evaluate_strided_slice(
input_values[0],
input_values[1],
input_values[2],
input_values[3],
convert_mask_to_axis_set(get_begin_mask()),
convert_mask_to_axis_set(get_end_mask()),
convert_mask_to_axis_set(get_new_axis_mask()),
convert_mask_to_axis_set(get_shrink_axis_mask()),
convert_mask_to_axis_set(get_ellipsis_mask()),
output_values[0]));
NGRAPH_OP_SCOPE(v1_StridedSlice_evaluate)
{
return strided_slice::evaluate_strided_slice(
input_values[0],
input_values[1],
input_values[2],
input_values[3],
convert_mask_to_axis_set(get_begin_mask()),
convert_mask_to_axis_set(get_end_mask()),
convert_mask_to_axis_set(get_new_axis_mask()),
convert_mask_to_axis_set(get_shrink_axis_mask()),
convert_mask_to_axis_set(get_ellipsis_mask()),
output_values[0]);
}
return false;
}

View File

@ -83,8 +83,9 @@ shared_ptr<Node> op::v1::Subtract::clone_with_new_inputs(const OutputVector& new
bool op::v1::Subtract::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v1_Subtract_evaluate,
return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_Subtract_evaluate)
{
return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

View File

@ -128,8 +128,9 @@ namespace swish
bool op::v4::Swish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v4_Swish_evaluate,
return swish::evaluate_swish(inputs, outputs[0], shape_size(get_output_shape(0))););
NGRAPH_OP_SCOPE(v4_Swish_evaluate)
{
return swish::evaluate_swish(inputs, outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -78,8 +78,9 @@ namespace tanop
bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Tan_evaluate,
return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Tan_evaluate)
{
return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -76,8 +76,9 @@ namespace tanhop
bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
v0_Tanh_evaluate,
return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0))));
NGRAPH_OP_SCOPE(v0_Tanh_evaluate)
{
return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return false;
}

View File

@ -135,6 +135,6 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs,
bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Tile_evaluate, return evaluate_tile(outputs, inputs));
NGRAPH_OP_SCOPE(v0_Tile_evaluate) { return evaluate_tile(outputs, inputs); }
return false;
}

View File

@ -67,8 +67,10 @@ namespace topk
#define EXECUTE_EVALUATE_TOPK(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(exec_topk_eval, _, a), \
rc = evaluate_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(exec_topk_eval, _, a)) \
{ \
rc = evaluate_execute<INPUT_ET, element::Type_t::a>(__VA_ARGS__); \
} \
} \
break
@ -189,8 +191,10 @@ namespace topk
#define CASE_GET_K(a, ...) \
case element::Type_t::a: \
{ \
NGRAPH_OP_SCOPE(OV_CC_CAT3(topk_get_k, _, a), \
k = get_k_from_hosttensor<element::Type_t::a>(__VA_ARGS__)); \
NGRAPH_OP_SCOPE(OV_CC_CAT3(topk_get_k, _, a)) \
{ \
k = get_k_from_hosttensor<element::Type_t::a>(__VA_ARGS__); \
} \
} \
break
@ -449,52 +453,49 @@ void op::v1::TopK::set_k(size_t k)
op::Constant::create(element::i64, Shape{}, {k})->output(0));
}
bool op::v1::TopK::evaluate_topk(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
Shape arg_shape = inputs[0]->get_shape();
// 1. get axis, mode ( max/min), sort_type
size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size());
bool compute_max = get_mode() == TopKMode::MAX ? true : false;
SortType sort_type = get_sort_type();
// 2. get value of k - from constant node or from HT
size_t k = 0;
if (op::is_constant(input_value(1).get_node()))
{
k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(),
get_input_element_type(1));
NGRAPH_CHECK(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis");
}
else
{
k = topk::read_k_from_host_tensor(inputs[1]);
}
// 3. Compute output_shape
auto output_shape = compute_output_shape(this->description(), inputs[0]->get_shape(), k);
// do this after compute_output_shape
if (k == 0)
{
// the kernel can't handle k = 0, but output_shape[axis] = arg_shape[axis]
k = arg_shape[axis];
}
return topk::evaluate_topk(inputs[0],
outputs[1],
outputs[0],
output_shape,
axis,
k,
compute_max,
sort_type,
get_index_element_type());
}
bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_TopK_evaluate, return evaluate_topk(outputs, inputs));
NGRAPH_OP_SCOPE(v1_TopK_evaluate)
{
Shape arg_shape = inputs[0]->get_shape();
// 1. get axis, mode ( max/min), sort_type
size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size());
bool compute_max = get_mode() == TopKMode::MAX ? true : false;
SortType sort_type = get_sort_type();
// 2. get value of k - from constant node or from HT
size_t k = 0;
if (op::is_constant(input_value(1).get_node()))
{
k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(),
get_input_element_type(1));
NGRAPH_CHECK(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis");
}
else
{
k = topk::read_k_from_host_tensor(inputs[1]);
}
// 3. Compute output_shape
auto output_shape = compute_output_shape(this->description(), inputs[0]->get_shape(), k);
// do this after compute_output_shape
if (k == 0)
{
// the kernel can't handle k = 0, but output_shape[axis] = arg_shape[axis]
k = arg_shape[axis];
}
return topk::evaluate_topk(inputs[0],
outputs[1],
outputs[0],
output_shape,
axis,
k,
compute_max,
sort_type,
get_index_element_type());
}
return false;
}
@ -577,6 +578,6 @@ shared_ptr<Node> op::v3::TopK::clone_with_new_inputs(const OutputVector& new_arg
bool op::v3::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v3_TopK_evaluate, return op::v1::TopK::evaluate(outputs, inputs));
NGRAPH_OP_SCOPE(v3_TopK_evaluate) { return op::v1::TopK::evaluate(outputs, inputs); }
return false;
}

View File

@ -144,8 +144,9 @@ namespace transpose
bool op::v1::Transpose::evaluate(const HostTensorVector& output_values,
const HostTensorVector& input_values) const
{
NGRAPH_OP_SCOPE(
v1_Transpose_evaluate,
return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]));
NGRAPH_OP_SCOPE(v1_Transpose_evaluate)
{
return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]);
}
return false;
}

View File

@ -150,8 +150,10 @@ namespace unsqueeze
bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Unsqueeze_evaluate,
return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]));
NGRAPH_OP_SCOPE(v0_Unsqueeze_evaluate)
{
return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]);
}
return false;
}

View File

@ -361,14 +361,16 @@ bool op::util::BroadcastBase::evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& out,
const AxisSet& broadcast_axes) const
{
NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes,
runtime::reference::broadcast(arg0->get_data_ptr<const char>(),
out->get_data_ptr<char>(),
arg0->get_shape(),
out->get_shape(),
broadcast_axes,
arg0->get_element_type().size());
return true);
NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes)
{
runtime::reference::broadcast(arg0->get_data_ptr<const char>(),
out->get_data_ptr<char>(),
arg0->get_shape(),
out->get_shape(),
broadcast_axes,
arg0->get_element_type().size());
return true;
}
return false;
}
@ -500,14 +502,16 @@ Shape op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) con
bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(
util_BroadcastBase_evaluate, Shape target_shape = get_target_shape(inputs[1]);
NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate)
{
Shape target_shape = get_target_shape(inputs[1]);
PartialShape result_shape;
std::pair<bool, AxisSet> pair_broadcast_axes;
auto arg_shape = inputs[0]->get_shape();
if (m_mode.m_type == BroadcastType::NONE) {
if (m_mode.m_type == BroadcastType::NONE)
{
AxisVector axes_mapping_val;
const auto axes_mapping_constant =
as_type_ptr<op::v0::Constant>(input_value(2).get_node_shared_ptr());
@ -523,18 +527,27 @@ bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs,
pair_broadcast_axes = get_broadcast_axes_none(axes_mapping_val, target_shape.size());
validate_target_shape_none(inputs[0]->get_shape(), axes_mapping_val, target_shape);
result_shape = target_shape;
} else if (m_mode.m_type == BroadcastType::PDPD) {
}
else if (m_mode.m_type == BroadcastType::PDPD)
{
result_shape = get_result_shape_pdpd(arg_shape, target_shape, m_mode);
pair_broadcast_axes =
get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode);
} else if (m_mode.m_type == BroadcastType::NUMPY) {
}
else if (m_mode.m_type == BroadcastType::NUMPY)
{
result_shape = target_shape;
validate_target_shape_numpy(arg_shape, target_shape);
pair_broadcast_axes =
get_broadcast_axes_numpy_pdpd(arg_shape, result_shape.to_shape(), m_mode);
} else { ngraph_error("Unsupported BroadcastType "); }
}
else
{
ngraph_error("Unsupported BroadcastType ");
}
return evaluate_broadcast(
inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape()));
inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape());
}
return false;
}

View File

@ -216,6 +216,6 @@ bool op::v1::VariadicSplit::evaluate_variadic_split(const HostTensorVector& inpu
bool op::v1::VariadicSplit::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate, return evaluate_variadic_split(inputs, outputs));
NGRAPH_OP_SCOPE(v1_VariadicSplit_evaluate) { return evaluate_variadic_split(inputs, outputs); }
return false;
}

View File

@ -86,8 +86,10 @@ namespace logxor
bool op::v1::LogicalXor::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_LogicalXor_evaluate,
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v1_LogicalXor_evaluate)
{
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}
@ -109,7 +111,9 @@ shared_ptr<Node> op::v0::Xor::clone_with_new_inputs(const OutputVector& new_args
bool op::v0::Xor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v0_Xor_evaluate,
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob()));
NGRAPH_OP_SCOPE(v0_Xor_evaluate)
{
return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
}
return false;
}

Some files were not shown because too many files have changed in this diff Show More