From 24209239bf8fed1fc42253632332252fea9cd1aa Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Tue, 5 Dec 2023 08:23:19 +0100 Subject: [PATCH] Restore CC feature in operators evaluate (#21446) --- src/core/src/op/abs.cpp | 11 +-- src/core/src/op/acos.cpp | 11 +-- src/core/src/op/acosh.cpp | 11 +-- src/core/src/op/add.cpp | 18 ++--- src/core/src/op/asin.cpp | 11 +-- src/core/src/op/asinh.cpp | 11 +-- src/core/src/op/atan.cpp | 11 +-- src/core/src/op/atanh.cpp | 11 +-- src/core/src/op/ceiling.cpp | 12 ++-- src/core/src/op/clamp.cpp | 16 +++-- src/core/src/op/cos.cpp | 11 +-- src/core/src/op/cosh.cpp | 11 +-- src/core/src/op/cum_sum.cpp | 15 +++-- src/core/src/op/divide.cpp | 19 +++--- src/core/src/op/equal.cpp | 18 ++--- src/core/src/op/erf.cpp | 11 +-- src/core/src/op/exp.cpp | 11 +-- src/core/src/op/eye.cpp | 11 +-- src/core/src/op/fake_convert.cpp | 13 ++-- src/core/src/op/fake_quantize.cpp | 31 +++++---- src/core/src/op/floor.cpp | 12 ++-- src/core/src/op/floor_mod.cpp | 18 ++--- src/core/src/op/gelu.cpp | 13 ++-- src/core/src/op/greater.cpp | 17 +++-- src/core/src/op/greater_eq.cpp | 17 +++-- src/core/src/op/grid_sample.cpp | 34 ++++++---- src/core/src/op/hsigmoid.cpp | 12 ++-- src/core/src/op/hswish.cpp | 11 +-- src/core/src/op/less.cpp | 17 +++-- src/core/src/op/less_eq.cpp | 17 +++-- src/core/src/op/log.cpp | 11 +-- src/core/src/op/logical_not.cpp | 12 ++-- src/core/src/op/matmul.cpp | 21 +++--- src/core/src/op/max_pool.cpp | 75 ++++++++++++--------- src/core/src/op/maximum.cpp | 17 +++-- src/core/src/op/minimum.cpp | 17 +++-- src/core/src/op/mish.cpp | 11 +-- src/core/src/op/mod.cpp | 17 +++-- src/core/src/op/multiply.cpp | 17 +++-- src/core/src/op/negative.cpp | 11 +-- src/core/src/op/non_zero.cpp | 22 ++++-- src/core/src/op/not_equal.cpp | 17 +++-- src/core/src/op/one_hot.cpp | 21 +++--- src/core/src/op/power.cpp | 17 +++-- src/core/src/op/prelu.cpp | 15 +++-- src/core/src/op/range.cpp | 26 ++++--- src/core/src/op/reduce_l1.cpp | 11 +-- src/core/src/op/reduce_l2.cpp | 11 +-- src/core/src/op/reduce_logical_and.cpp | 11 +-- src/core/src/op/reduce_logical_or.cpp | 11 +-- src/core/src/op/reduce_max.cpp | 11 +-- src/core/src/op/reduce_mean.cpp | 11 +-- src/core/src/op/reduce_min.cpp | 11 +-- src/core/src/op/reduce_prod.cpp | 11 +-- src/core/src/op/reduce_sum.cpp | 11 +-- src/core/src/op/relu.cpp | 11 +-- src/core/src/op/round.cpp | 14 ++-- src/core/src/op/scatter_elements_update.cpp | 48 +++++++------ src/core/src/op/scatter_nd_update.cpp | 38 ++++++----- src/core/src/op/sigmoid.cpp | 11 +-- src/core/src/op/sign.cpp | 11 +-- src/core/src/op/sin.cpp | 11 +-- src/core/src/op/sinh.cpp | 11 +-- src/core/src/op/softmax.cpp | 26 ++++--- src/core/src/op/softplus.cpp | 11 +-- src/core/src/op/softsign.cpp | 11 +-- src/core/src/op/sqrt.cpp | 11 +-- src/core/src/op/subtract.cpp | 17 +++-- src/core/src/op/swish.cpp | 13 ++-- src/core/src/op/tan.cpp | 11 +-- src/core/src/op/tanh.cpp | 11 +-- src/core/src/op/topk.cpp | 42 +++++++----- src/core/src/op/xor.cpp | 17 +++-- 73 files changed, 718 insertions(+), 475 deletions(-) diff --git a/src/core/src/op/abs.cpp b/src/core/src/op/abs.cpp index 43e034e3dc0..8b87bd76579 100644 --- a/src/core/src/op/abs.cpp +++ b/src/core/src/op/abs.cpp @@ -42,10 +42,13 @@ bool Abs::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Abs_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64), + abs::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Abs::has_evaluate() const { diff --git a/src/core/src/op/acos.cpp b/src/core/src/op/acos.cpp index 8721960f53b..1aab9570405 100644 --- a/src/core/src/op/acos.cpp +++ b/src/core/src/op/acos.cpp @@ -41,10 +41,13 @@ bool ov::op::v0::Acos::evaluate(TensorVector& outputs, const TensorVector& input outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Acos_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + acos::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool ov::op::v0::Acos::has_evaluate() const { diff --git a/src/core/src/op/acosh.cpp b/src/core/src/op/acosh.cpp index 115ce76ac63..85fbfb31115 100644 --- a/src/core/src/op/acosh.cpp +++ b/src/core/src/op/acosh.cpp @@ -41,10 +41,13 @@ bool ov::op::v3::Acosh::evaluate(TensorVector& outputs, const TensorVector& inpu outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v3_Acosh_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + acosh::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool ov::op::v3::Acosh::has_evaluate() const { diff --git a/src/core/src/op/add.cpp b/src/core/src/op/add.cpp index 0d09563b9ae..ee81de632cf 100644 --- a/src/core/src/op/add.cpp +++ b/src/core/src/op/add.cpp @@ -48,14 +48,16 @@ bool Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Add_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64), + add::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Add::has_evaluate() const { diff --git a/src/core/src/op/asin.cpp b/src/core/src/op/asin.cpp index 00832dce255..19183014f30 100644 --- a/src/core/src/op/asin.cpp +++ b/src/core/src/op/asin.cpp @@ -41,10 +41,13 @@ bool Asin::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Asin_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + asin::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Asin::has_evaluate() const { diff --git a/src/core/src/op/asinh.cpp b/src/core/src/op/asinh.cpp index 80e7396f27d..aa2ecdd432f 100644 --- a/src/core/src/op/asinh.cpp +++ b/src/core/src/op/asinh.cpp @@ -40,10 +40,13 @@ bool Asinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v3_Asinh_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + asinh::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Asinh::has_evaluate() const { diff --git a/src/core/src/op/atan.cpp b/src/core/src/op/atan.cpp index 6732d4b952a..2004a3a3679 100644 --- a/src/core/src/op/atan.cpp +++ b/src/core/src/op/atan.cpp @@ -43,10 +43,13 @@ bool Atan::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Atan_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + atan::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Atan::has_evaluate() const { diff --git a/src/core/src/op/atanh.cpp b/src/core/src/op/atanh.cpp index b17431862fa..c619f01796a 100644 --- a/src/core/src/op/atanh.cpp +++ b/src/core/src/op/atanh.cpp @@ -40,10 +40,13 @@ bool op::v3::Atanh::evaluate(TensorVector& outputs, const TensorVector& inputs) outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v3_Atanh_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + atanh::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool op::v3::Atanh::has_evaluate() const { diff --git a/src/core/src/op/ceiling.cpp b/src/core/src/op/ceiling.cpp index c46ed21ae03..17139143523 100644 --- a/src/core/src/op/ceiling.cpp +++ b/src/core/src/op/ceiling.cpp @@ -44,11 +44,13 @@ bool Ceiling::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Ceiling_evaluate, + OV_PP_ET_LIST(f16, f32, i8, i16, i32, i64, u8, u16, u32, u64), + ceiling::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Ceiling::has_evaluate() const { diff --git a/src/core/src/op/clamp.cpp b/src/core/src/op/clamp.cpp index f3b0d19af17..76350a49b24 100644 --- a/src/core/src/op/clamp.cpp +++ b/src/core/src/op/clamp.cpp @@ -68,13 +68,15 @@ bool Clamp::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - outputs[0], - get_min(), - get_max(), - shape_size(in_shape)); + return IF_TYPE_OF(v0_Clamp_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64), + clamp::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + get_min(), + get_max(), + shape_size(in_shape)); } bool Clamp::has_evaluate() const { diff --git a/src/core/src/op/cos.cpp b/src/core/src/op/cos.cpp index 9fd6562838a..575d148f2d9 100644 --- a/src/core/src/op/cos.cpp +++ b/src/core/src/op/cos.cpp @@ -47,10 +47,13 @@ bool Cos::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Cos_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + cos::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Cos::has_evaluate() const { diff --git a/src/core/src/op/cosh.cpp b/src/core/src/op/cosh.cpp index 1792fba5c3e..ebc05f76127 100644 --- a/src/core/src/op/cosh.cpp +++ b/src/core/src/op/cosh.cpp @@ -47,10 +47,13 @@ bool Cosh::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Cosh_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + cosh::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Cosh::has_evaluate() const { diff --git a/src/core/src/op/cum_sum.cpp b/src/core/src/op/cum_sum.cpp index 774e2be80c8..a8737d59b7c 100644 --- a/src/core/src/op/cum_sum.cpp +++ b/src/core/src/op/cum_sum.cpp @@ -35,12 +35,15 @@ bool evaluate(TensorVector& outputs, const TensorVector& inputs, const bool excl const auto axis = ov::get_tensor_data_as(inputs[1]).front(); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - axis, - exclusive, - reverse); + return IF_TYPE_OF(CumSum_evaluate, + f32, + cumsum::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + axis, + exclusive, + reverse); } } // namespace } // namespace cumsum diff --git a/src/core/src/op/divide.cpp b/src/core/src/op/divide.cpp index c2a9020cb03..67a0b6c7265 100644 --- a/src/core/src/op/divide.cpp +++ b/src/core/src/op/divide.cpp @@ -242,14 +242,17 @@ bool Divide::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob(), - is_pythondiv()); + return IF_TYPE_OF(v1_Divide_evaluate, + OV_PP_ET_LIST(f16, bf16, f32, i32, i64, u32, u64), + divide::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob(), + is_pythondiv()); return true; } diff --git a/src/core/src/op/equal.cpp b/src/core/src/op/equal.cpp index 7f23b8970e2..3460d1c7c2e 100644 --- a/src/core/src/op/equal.cpp +++ b/src/core/src/op/equal.cpp @@ -101,14 +101,16 @@ bool Equal::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(ov::op::infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Equal_evaluate, + OV_PP_ET_LIST(boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64), + equal::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Equal::evaluate_lower(TensorVector& output_values) const { diff --git a/src/core/src/op/erf.cpp b/src/core/src/op/erf.cpp index 1315453b2d6..4aa2dda2091 100644 --- a/src/core/src/op/erf.cpp +++ b/src/core/src/op/erf.cpp @@ -43,10 +43,13 @@ bool Erf::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Erf_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + erf::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Erf::has_evaluate() const { diff --git a/src/core/src/op/exp.cpp b/src/core/src/op/exp.cpp index ecd4d379133..c067c36748f 100644 --- a/src/core/src/op/exp.cpp +++ b/src/core/src/op/exp.cpp @@ -44,10 +44,13 @@ bool Exp::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Exp_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + exp::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Exp::has_evaluate() const { diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index 86fe1c62baf..a3612c40e61 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -123,10 +123,13 @@ bool Eye::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(output_shape); using namespace ov::element; - return IfTypeOf::apply(outputs[0].get_element_type(), - outputs[0], - output_shape, - diagonal_index); + return IF_TYPE_OF(v9_Eye_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, f64, i8, i32, i64, u8), + eye::Evaluate, + outputs[0].get_element_type(), + outputs[0], + output_shape, + diagonal_index); } } // namespace v9 } // namespace op diff --git a/src/core/src/op/fake_convert.cpp b/src/core/src/op/fake_convert.cpp index 48a857f5aa8..0aad25b5891 100644 --- a/src/core/src/op/fake_convert.cpp +++ b/src/core/src/op/fake_convert.cpp @@ -135,10 +135,15 @@ bool FakeConvert::evaluate(ov::TensorVector& outputs, const ov::TensorVector& in outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - outputs, - inputs, - get_destination_type()); + return IF_TYPE_OF(v13_FakeConvert_evaluate, + OV_PP_ET_LIST(bf16, f16, f32), + fake_convert_details::Evaluate, + inputs[0].get_element_type(), + outputs, + inputs, + get_destination_type()); + + return true; } } // namespace v13 } // namespace op diff --git a/src/core/src/op/fake_quantize.cpp b/src/core/src/op/fake_quantize.cpp index 9b7ba0e991a..b15137e9203 100644 --- a/src/core/src/op/fake_quantize.cpp +++ b/src/core/src/op/fake_quantize.cpp @@ -113,20 +113,23 @@ bool FakeQuantize::evaluate(TensorVector& outputs, const TensorVector& inputs) c outputs[0].set_shape(shape0); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - inputs[2], - inputs[3], - inputs[4], - outputs[0], - shape0, - inputs[1].get_shape(), - inputs[2].get_shape(), - inputs[3].get_shape(), - inputs[4].get_shape(), - get_levels(), - get_auto_broadcast()); + return IF_TYPE_OF(v0_FakeQuantize_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + fake_quantize::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + inputs[2], + inputs[3], + inputs[4], + outputs[0], + shape0, + inputs[1].get_shape(), + inputs[2].get_shape(), + inputs[3].get_shape(), + inputs[4].get_shape(), + get_levels(), + get_auto_broadcast()); } bool FakeQuantize::has_evaluate() const { diff --git a/src/core/src/op/floor.cpp b/src/core/src/op/floor.cpp index c884dac18ca..e84e42a9cd7 100644 --- a/src/core/src/op/floor.cpp +++ b/src/core/src/op/floor.cpp @@ -44,11 +44,13 @@ bool Floor::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Floor_evaluate, + OV_PP_ET_LIST(f16, f32, i8, i16, i32, i64, u8, u16, u32, u64), + floor::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Floor::has_evaluate() const { diff --git a/src/core/src/op/floor_mod.cpp b/src/core/src/op/floor_mod.cpp index 225c70a5e5d..1c45edf4af3 100644 --- a/src/core/src/op/floor_mod.cpp +++ b/src/core/src/op/floor_mod.cpp @@ -48,14 +48,16 @@ bool FloorMod::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_FloorMod_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i8, i32, i64, u8, u32, u64), + floor_mod::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool FloorMod::has_evaluate() const { diff --git a/src/core/src/op/gelu.cpp b/src/core/src/op/gelu.cpp index cc261ca1650..c8264e2ac76 100644 --- a/src/core/src/op/gelu.cpp +++ b/src/core/src/op/gelu.cpp @@ -104,11 +104,14 @@ bool Gelu::evaluate(TensorVector& outputs, const TensorVector& inputs) const { const auto count = shape_size(input_shape); outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - m_approximation_mode, - count); + return IF_TYPE_OF(v7_Gelu_evaluate, + OV_PP_ET_LIST(f16, f32), + gelu::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + m_approximation_mode, + count); } bool Gelu::has_evaluate() const { diff --git a/src/core/src/op/greater.cpp b/src/core/src/op/greater.cpp index 76715745a5f..ed09e77f4b0 100644 --- a/src/core/src/op/greater.cpp +++ b/src/core/src/op/greater.cpp @@ -53,13 +53,16 @@ bool Greater::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Greater_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64), + greater::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Greater::has_evaluate() const { diff --git a/src/core/src/op/greater_eq.cpp b/src/core/src/op/greater_eq.cpp index a3bd099262a..4f111431f8a 100644 --- a/src/core/src/op/greater_eq.cpp +++ b/src/core/src/op/greater_eq.cpp @@ -53,13 +53,16 @@ bool GreaterEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) c outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_GreaterEqual_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64), + greater_equal::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool GreaterEqual::has_evaluate() const { diff --git a/src/core/src/op/grid_sample.cpp b/src/core/src/op/grid_sample.cpp index bb81c977893..d5ed0790d4a 100644 --- a/src/core/src/op/grid_sample.cpp +++ b/src/core/src/op/grid_sample.cpp @@ -25,13 +25,16 @@ struct Evaluate : element::NoAction { const Shape& grid_shape, const GridSample::Attributes& attributes) { using namespace ov::element; - return IfTypeOf::apply(grid.get_element_type(), - output.data(), - data.data(), - grid, - data_shape, - grid_shape, - attributes); + return IF_TYPE_OF(eval_by_grid_type, + OV_PP_ET_LIST(f32), + EvalByGridType, + grid.get_element_type(), + output.data(), + data.data(), + grid, + data_shape, + grid_shape, + attributes); } private: @@ -100,13 +103,16 @@ bool GridSample::evaluate(TensorVector& outputs, const TensorVector& inputs) con outputs[0].set_shape(out_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - outputs[0], - inputs[0], - inputs[1], - inputs[0].get_shape(), - inputs[1].get_shape(), - m_attributes); + return IF_TYPE_OF(v9_GridSample_evaluate, + OV_PP_ET_LIST(f32), + Evaluate, + inputs[0].get_element_type(), + outputs[0], + inputs[0], + inputs[1], + inputs[0].get_shape(), + inputs[1].get_shape(), + m_attributes); } bool GridSample::has_evaluate() const { diff --git a/src/core/src/op/hsigmoid.cpp b/src/core/src/op/hsigmoid.cpp index 2abc4c02c5d..9ed9ac3aa79 100644 --- a/src/core/src/op/hsigmoid.cpp +++ b/src/core/src/op/hsigmoid.cpp @@ -42,11 +42,15 @@ bool HSigmoid::evaluate(TensorVector& outputs, const TensorVector& inputs) const const auto& input_shape = inputs[0].get_shape(); const auto count = shape_size(input_shape); outputs[0].set_shape(input_shape); + using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - count); + return IF_TYPE_OF(v5_HSigmoid_evaluate, + OV_PP_ET_LIST(bf16, f16, f32), + hsigmoid::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + count); } bool HSigmoid::has_evaluate() const { diff --git a/src/core/src/op/hswish.cpp b/src/core/src/op/hswish.cpp index fd2d89896c0..dbb7e744cad 100644 --- a/src/core/src/op/hswish.cpp +++ b/src/core/src/op/hswish.cpp @@ -43,10 +43,13 @@ bool HSwish::evaluate(TensorVector& outputs, const TensorVector& inputs) const { const auto count = shape_size(input_shape); outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - count); + return IF_TYPE_OF(v4_HSwish_evaluate, + OV_PP_ET_LIST(bf16, f16, f32), + hswish::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + count); } bool HSwish::has_evaluate() const { diff --git a/src/core/src/op/less.cpp b/src/core/src/op/less.cpp index 910876c3a58..1f7f27dc850 100644 --- a/src/core/src/op/less.cpp +++ b/src/core/src/op/less.cpp @@ -52,13 +52,16 @@ bool Less::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Less_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64), + less::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Less::has_evaluate() const { diff --git a/src/core/src/op/less_eq.cpp b/src/core/src/op/less_eq.cpp index 76c94ad91cb..5bc3837885f 100644 --- a/src/core/src/op/less_eq.cpp +++ b/src/core/src/op/less_eq.cpp @@ -53,13 +53,16 @@ bool LessEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) cons outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_LessEqual_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64), + less_equal::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool LessEqual::has_evaluate() const { diff --git a/src/core/src/op/log.cpp b/src/core/src/op/log.cpp index dacde7087e9..0bbaa1d250d 100644 --- a/src/core/src/op/log.cpp +++ b/src/core/src/op/log.cpp @@ -42,10 +42,13 @@ bool Log::evaluate(TensorVector& outputs, const TensorVector& inputs) const { const auto count = shape_size(input_shape); outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - count); + return IF_TYPE_OF(v0_Log_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + log::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + count); } bool Log::has_evaluate() const { diff --git a/src/core/src/op/logical_not.cpp b/src/core/src/op/logical_not.cpp index db9f9394636..e3aab7c64a3 100644 --- a/src/core/src/op/logical_not.cpp +++ b/src/core/src/op/logical_not.cpp @@ -51,11 +51,13 @@ bool LogicalNot::evaluate(TensorVector& outputs, const TensorVector& inputs) con outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply( - inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v1_LogicalNot_evaluate, + OV_PP_ET_LIST(boolean, i32, i64, u32, u64, f16, f32), + logical_not::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool LogicalNot::has_evaluate() const { diff --git a/src/core/src/op/matmul.cpp b/src/core/src/op/matmul.cpp index 06fd0a9f33e..130b8a793d6 100644 --- a/src/core/src/op/matmul.cpp +++ b/src/core/src/op/matmul.cpp @@ -68,15 +68,18 @@ bool MatMul::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(out_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - out_shape, - m_transpose_a, - m_transpose_b); + return IF_TYPE_OF(v0_MatMul_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + matmul::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + out_shape, + m_transpose_a, + m_transpose_b); } bool MatMul::has_evaluate() const { diff --git a/src/core/src/op/max_pool.cpp b/src/core/src/op/max_pool.cpp index 4dab91221f9..c7afdb77187 100644 --- a/src/core/src/op/max_pool.cpp +++ b/src/core/src/op/max_pool.cpp @@ -93,15 +93,18 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(output_shape.get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - inputs[0].get_shape(), - outputs[0].get_shape(), - get_kernel(), - get_strides(), - get_pads_begin(), - get_pads_end()); + return IF_TYPE_OF(v1_MaxPool_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + maxpool::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + inputs[0].get_shape(), + outputs[0].get_shape(), + get_kernel(), + get_strides(), + get_pads_begin(), + get_pads_end()); } bool MaxPool::has_evaluate() const { @@ -208,18 +211,21 @@ struct Evaluate : element::NoAction { const Shape& pads_end, const int64_t axis) { using namespace ov::element; - return IfTypeOf::apply(out_indices.get_element_type(), - in.data(), - out_values.data(), - out_indices, - in_shape, - out_shape, - kernel, - strides, - dilations, - pads_begin, - pads_end, - axis); + return IF_TYPE_OF(maxpool_eval_by_idx_type, + OV_PP_ET_LIST(i32, i64), + EvalByIdxType, + out_indices.get_element_type(), + in.data(), + out_values.data(), + out_indices, + in_shape, + out_shape, + kernel, + strides, + dilations, + pads_begin, + pads_end, + axis); } private: @@ -265,18 +271,21 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(output_shape.get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - outputs[1], - inputs[0].get_shape(), - outputs[0].get_shape(), - get_kernel(), - get_strides(), - get_dilations(), - get_pads_begin(), - get_pads_end(), - get_axis()); + return IF_TYPE_OF(v8_MaxPool_evaluate, + OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64), + maxpool::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + outputs[1], + inputs[0].get_shape(), + outputs[0].get_shape(), + get_kernel(), + get_strides(), + get_dilations(), + get_pads_begin(), + get_pads_end(), + get_axis()); } bool MaxPool::has_evaluate() const { diff --git a/src/core/src/op/maximum.cpp b/src/core/src/op/maximum.cpp index 90a038d0b54..5a9e832bbb2 100644 --- a/src/core/src/op/maximum.cpp +++ b/src/core/src/op/maximum.cpp @@ -48,13 +48,16 @@ bool Maximum::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Maximum_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + maximum::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Maximum::has_evaluate() const { diff --git a/src/core/src/op/minimum.cpp b/src/core/src/op/minimum.cpp index 1844c6e5b25..544f33e5055 100644 --- a/src/core/src/op/minimum.cpp +++ b/src/core/src/op/minimum.cpp @@ -49,13 +49,16 @@ bool Minimum::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Minimum_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u8, u16, u32, u64), + minimum::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Minimum::has_evaluate() const { diff --git a/src/core/src/op/mish.cpp b/src/core/src/op/mish.cpp index 606ff6239f5..406692baccd 100644 --- a/src/core/src/op/mish.cpp +++ b/src/core/src/op/mish.cpp @@ -58,10 +58,13 @@ bool Mish::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v4_Mish_evaluate, + OV_PP_ET_LIST(f16, f32), + mish::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Mish::has_evaluate() const { diff --git a/src/core/src/op/mod.cpp b/src/core/src/op/mod.cpp index 69ac9493052..b321b58d4c5 100644 --- a/src/core/src/op/mod.cpp +++ b/src/core/src/op/mod.cpp @@ -244,13 +244,16 @@ bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Mod_evaluate, + OV_PP_ET_LIST(i8, i16, i32, i64, u8, u16, u32, u64), + mod::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Mod::evaluate_lower(TensorVector& outputs) const { diff --git a/src/core/src/op/multiply.cpp b/src/core/src/op/multiply.cpp index 2ae5f4304cf..c1c47df1abd 100644 --- a/src/core/src/op/multiply.cpp +++ b/src/core/src/op/multiply.cpp @@ -47,13 +47,16 @@ bool Multiply::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Multiply_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, f64, i32, i64, u32, u64), + multiply::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Multiply::has_evaluate() const { diff --git a/src/core/src/op/negative.cpp b/src/core/src/op/negative.cpp index a34d29a479d..8f8f6a2cc51 100644 --- a/src/core/src/op/negative.cpp +++ b/src/core/src/op/negative.cpp @@ -42,10 +42,13 @@ bool Negative::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Negative_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i32, i64), + negative::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Negative::has_evaluate() const { diff --git a/src/core/src/op/non_zero.cpp b/src/core/src/op/non_zero.cpp index 8257c05924f..06c0df37d58 100644 --- a/src/core/src/op/non_zero.cpp +++ b/src/core/src/op/non_zero.cpp @@ -26,7 +26,13 @@ struct Evaluate : public element::NoAction { out.set_shape(out_shape); using namespace ov::element; - return IfTypeOf::apply(out.get_element_type(), in_data, out, in_shape); + return IF_TYPE_OF(non_zero_out_type, + OV_PP_ET_LIST(i32, i64), + EvalByOutType, + out.get_element_type(), + in_data, + out, + in_shape); } private: @@ -114,12 +120,14 @@ bool NonZero::evaluate(TensorVector& outputs, const TensorVector& inputs) const auto& output = outputs[0]; using namespace ov::element; const auto& input_shape = input.get_shape(); - return IfTypeOf::apply( - input.get_element_type(), - input, - input_shape, - input_shape.size(), - output); + return IF_TYPE_OF(v3_NonZero_evaluate, + OV_PP_ET_LIST(boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64), + non_zero::Evaluate, + input.get_element_type(), + input, + input_shape, + input_shape.size(), + output); } bool NonZero::has_evaluate() const { diff --git a/src/core/src/op/not_equal.cpp b/src/core/src/op/not_equal.cpp index 55c0f5a3d9f..920b79064cb 100644 --- a/src/core/src/op/not_equal.cpp +++ b/src/core/src/op/not_equal.cpp @@ -51,13 +51,16 @@ bool NotEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_NotEqual_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64), + not_equal::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool NotEqual::has_evaluate() const { diff --git a/src/core/src/op/one_hot.cpp b/src/core/src/op/one_hot.cpp index 8a3cd26e6fd..ac9ba016a70 100644 --- a/src/core/src/op/one_hot.cpp +++ b/src/core/src/op/one_hot.cpp @@ -118,15 +118,18 @@ bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { auto& output = outputs[0]; output.set_shape(output_shape); using namespace ov::element; - return IfTypeOf::apply(indices.get_element_type(), - indices, - indices_shape, - static_cast(output.data()), - output.get_element_type().size(), - output.get_shape()[axis], - on_value, - off_value, - axis); + return IF_TYPE_OF(v1_OneHot_evaluate, + OV_PP_ET_LIST(i32, i64), + one_hot::Evaluate, + indices.get_element_type(), + indices, + indices_shape, + static_cast(output.data()), + output.get_element_type().size(), + output.get_shape()[axis], + on_value, + off_value, + axis); } bool OneHot::has_evaluate() const { diff --git a/src/core/src/op/power.cpp b/src/core/src/op/power.cpp index 7eda718951e..88460b36e33 100644 --- a/src/core/src/op/power.cpp +++ b/src/core/src/op/power.cpp @@ -49,13 +49,16 @@ bool Power::evaluate(TensorVector& outputs, const TensorVector& inputs) const { out.set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - out, - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Power_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64), + power::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + out, + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Power::has_evaluate() const { diff --git a/src/core/src/op/prelu.cpp b/src/core/src/op/prelu.cpp index ee417602cf0..7c65f5c15cf 100644 --- a/src/core/src/op/prelu.cpp +++ b/src/core/src/op/prelu.cpp @@ -55,12 +55,15 @@ bool PRelu::evaluate(TensorVector& outputs, const TensorVector& inputs) const { out.set_shape(arg_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - out, - arg_shape, - inputs[1].get_shape()); + return IF_TYPE_OF(v0_PRelu_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i8), + prelu::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + out, + arg_shape, + inputs[1].get_shape()); } bool PRelu::has_evaluate() const { diff --git a/src/core/src/op/range.cpp b/src/core/src/op/range.cpp index f048f778c31..fef5f15aa4f 100644 --- a/src/core/src/op/range.cpp +++ b/src/core/src/op/range.cpp @@ -119,11 +119,14 @@ bool Range::evaluate(TensorVector& outputs, const TensorVector& inputs) const { const auto step = get_tensor_data_as(inputs[2])[0]; using namespace ov::element; - return IfTypeOf::apply(out.get_element_type(), - start, - step, - shape_size(out_shape), - out); + return IF_TYPE_OF(v4_Range_evaluate, + RANGE_ET_LIST, + range::Evaluate, + out.get_element_type(), + start, + step, + shape_size(out_shape), + out); } bool Range::has_evaluate() const { @@ -199,11 +202,14 @@ bool Range::evaluate(TensorVector& outputs, const TensorVector& inputs) const { out.set_shape(out_shape); using namespace ov::element; - return IfTypeOf::apply(out.get_element_type(), - start, - step, - shape_size(out_shape), - out); + return IF_TYPE_OF(v0_Range_evaluate, + RANGE_ET_LIST, + range::Evaluate, + out.get_element_type(), + start, + step, + shape_size(out_shape), + out); } bool Range::has_evaluate() const { diff --git a/src/core/src/op/reduce_l1.cpp b/src/core/src/op/reduce_l1.cpp index 75f8a000580..fb44d686764 100644 --- a/src/core/src/op/reduce_l1.cpp +++ b/src/core/src/op/reduce_l1.cpp @@ -48,10 +48,13 @@ bool ReduceL1::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v4_ReduceL1_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i32, i64), + reduce_l1::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceL1::has_evaluate() const { diff --git a/src/core/src/op/reduce_l2.cpp b/src/core/src/op/reduce_l2.cpp index 5477a56986b..9f3c48a2144 100644 --- a/src/core/src/op/reduce_l2.cpp +++ b/src/core/src/op/reduce_l2.cpp @@ -47,10 +47,13 @@ bool ReduceL2::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v4_ReduceL2_evaluate, + OV_PP_ET_LIST(bf16, f16, f32), + reduce_l2::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceL2::has_evaluate() const { diff --git a/src/core/src/op/reduce_logical_and.cpp b/src/core/src/op/reduce_logical_and.cpp index adcfed43626..0178917aae3 100644 --- a/src/core/src/op/reduce_logical_and.cpp +++ b/src/core/src/op/reduce_logical_and.cpp @@ -47,10 +47,13 @@ bool ReduceLogicalAnd::evaluate(TensorVector& outputs, const TensorVector& input outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceLogicalAnd_evaluate, + boolean, + reduce_and::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceLogicalAnd::has_evaluate() const { diff --git a/src/core/src/op/reduce_logical_or.cpp b/src/core/src/op/reduce_logical_or.cpp index a2e84d420e6..b41c78fa859 100644 --- a/src/core/src/op/reduce_logical_or.cpp +++ b/src/core/src/op/reduce_logical_or.cpp @@ -48,10 +48,13 @@ bool ReduceLogicalOr::evaluate(TensorVector& outputs, const TensorVector& inputs outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceLogicalOr_evaluate, + boolean, + reduce_or::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceLogicalOr::has_evaluate() const { diff --git a/src/core/src/op/reduce_max.cpp b/src/core/src/op/reduce_max.cpp index 989f0a771f2..f64b7b1e6ec 100644 --- a/src/core/src/op/reduce_max.cpp +++ b/src/core/src/op/reduce_max.cpp @@ -47,10 +47,13 @@ bool ReduceMax::evaluate(TensorVector& outputs, const TensorVector& inputs) cons outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceMax_evaluate, + OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64), + reduce_max::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceMax::has_evaluate() const { diff --git a/src/core/src/op/reduce_mean.cpp b/src/core/src/op/reduce_mean.cpp index 762bc1c0971..25f30d0235d 100644 --- a/src/core/src/op/reduce_mean.cpp +++ b/src/core/src/op/reduce_mean.cpp @@ -45,10 +45,13 @@ bool ReduceMean::evaluate(TensorVector& outputs, const TensorVector& inputs) con outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceMean_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + reduce_mean::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceMean::has_evaluate() const { diff --git a/src/core/src/op/reduce_min.cpp b/src/core/src/op/reduce_min.cpp index 3334b02d5fa..b60b0e9889e 100644 --- a/src/core/src/op/reduce_min.cpp +++ b/src/core/src/op/reduce_min.cpp @@ -45,10 +45,13 @@ bool ReduceMin::evaluate(TensorVector& outputs, const TensorVector& inputs) cons outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceMin_evaluate, + OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64), + reduce_min::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceMin::has_evaluate() const { diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index 9d2c4dee4a8..d80f040e5ef 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -57,10 +57,13 @@ bool ReduceProd::evaluate(TensorVector& outputs, const TensorVector& inputs) con outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceProd_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + reduce_prod::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceProd::has_evaluate() const { diff --git a/src/core/src/op/reduce_sum.cpp b/src/core/src/op/reduce_sum.cpp index 33e7ced8204..b661fbc1ea4 100644 --- a/src/core/src/op/reduce_sum.cpp +++ b/src/core/src/op/reduce_sum.cpp @@ -45,10 +45,13 @@ bool ReduceSum::evaluate(TensorVector& outputs, const TensorVector& inputs) cons outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - reduction_axes); + return IF_TYPE_OF(v1_ReduceSum_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + reduce_sum::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } bool ReduceSum::has_evaluate() const { diff --git a/src/core/src/op/relu.cpp b/src/core/src/op/relu.cpp index ebc8aa46d39..eff1ecc0f75 100644 --- a/src/core/src/op/relu.cpp +++ b/src/core/src/op/relu.cpp @@ -42,10 +42,13 @@ bool Relu::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Relu_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + relu::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Relu::has_evaluate() const { diff --git a/src/core/src/op/round.cpp b/src/core/src/op/round.cpp index 16643086e71..24adc7e9bfe 100644 --- a/src/core/src/op/round.cpp +++ b/src/core/src/op/round.cpp @@ -59,12 +59,14 @@ bool Round::evaluate(TensorVector& outputs, const TensorVector& inputs) const { auto& out = outputs.front(); using namespace ov::element; - return IfTypeOf::apply( - arg0.get_element_type(), - arg0, - out, - shape_size(arg0.get_shape()), - get_mode()); + return IF_TYPE_OF(v5_Round_evaluate, + OV_PP_ET_LIST(boolean, i8, i16, i32, i64, u8, u16, u32, u64, bf16, f16, f32), + round::Evaluate, + arg0.get_element_type(), + arg0, + out, + shape_size(arg0.get_shape()), + get_mode()); } bool Round::has_evaluate() const { diff --git a/src/core/src/op/scatter_elements_update.cpp b/src/core/src/op/scatter_elements_update.cpp index c9dabd1a91d..2cec3ff0762 100644 --- a/src/core/src/op/scatter_elements_update.cpp +++ b/src/core/src/op/scatter_elements_update.cpp @@ -101,16 +101,19 @@ struct Evaluate : public element::NoAction { ) { using namespace ov::element; - return IfTypeOf::apply(indices.get_element_type(), - data.data(), - indices, - updates.data(), - output.data
(), - data_shape, - indices_shape, - axis, - reduction, - use_init_value); + return IF_TYPE_OF(scatter_el_update_idx_type, + OV_PP_ET_LIST(i8, i16, i32, i64, u8, u16, u32, u64), + EvaluateByIndicesType, + indices.get_element_type(), + data.data(), + indices, + updates.data(), + output.data
(), + data_shape, + indices_shape, + axis, + reduction, + use_init_value); } private: @@ -156,18 +159,21 @@ bool evaluate(TensorVector& outputs, const auto& data_shape = data.get_shape(); const auto& indices_shape = indices.get_shape(); output.set_shape(data_shape); + using namespace ov::element; - return IfTypeOf::apply( - data.get_element_type(), - data, - indices, - updates, - output, - data_shape, - indices_shape, - axis, - reduction, - use_init_value); + return IF_TYPE_OF(scatter_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i16, i32, i64, u32, u64), + scatter_elements_update::Evaluate, + data.get_element_type(), + data, + indices, + updates, + output, + data_shape, + indices_shape, + axis, + reduction, + use_init_value); } } // namespace } // namespace scatter_elements_update diff --git a/src/core/src/op/scatter_nd_update.cpp b/src/core/src/op/scatter_nd_update.cpp index 3ca0d83686a..e6d79f99366 100644 --- a/src/core/src/op/scatter_nd_update.cpp +++ b/src/core/src/op/scatter_nd_update.cpp @@ -24,14 +24,17 @@ struct Evaluate : public element::NoAction { const Shape& indices_shape, const Shape& updates_shape) { using namespace ov::element; - return IfTypeOf::apply(indices.get_element_type(), - data.data(), - indices, - updates.data(), - output.data
(), - data_shape, - indices_shape, - updates_shape); + return IF_TYPE_OF(sctter_nd_eval_idx_type, + OV_PP_ET_LIST(i32, i64), + EvaluateByIndicesType, + indices.get_element_type(), + data.data(), + indices, + updates.data(), + output.data
(), + data_shape, + indices_shape, + updates_shape); } private: @@ -82,14 +85,17 @@ bool ScatterNDUpdate::evaluate(TensorVector& outputs, const TensorVector& inputs const auto& updates_shape = updates.get_shape(); output.set_shape(data_shape); using namespace ov::element; - return IfTypeOf::apply(data.get_element_type(), - data, - indices, - updates, - output, - data_shape, - indices_shape, - updates_shape); + return IF_TYPE_OF(v3_ScatterNDUpdate_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64), + scatter_nd_update::Evaluate, + data.get_element_type(), + data, + indices, + updates, + output, + data_shape, + indices_shape, + updates_shape); } bool ScatterNDUpdate::has_evaluate() const { diff --git a/src/core/src/op/sigmoid.cpp b/src/core/src/op/sigmoid.cpp index a4ce31db1e3..abe6105b45d 100644 --- a/src/core/src/op/sigmoid.cpp +++ b/src/core/src/op/sigmoid.cpp @@ -44,10 +44,13 @@ bool Sigmoid::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Sigmoid_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + sigmoid::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Sigmoid::has_evaluate() const { diff --git a/src/core/src/op/sign.cpp b/src/core/src/op/sign.cpp index f22798bfcdc..10aafb5f29a 100644 --- a/src/core/src/op/sign.cpp +++ b/src/core/src/op/sign.cpp @@ -43,10 +43,13 @@ bool Sign::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Sign_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + sign::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Sign::has_evaluate() const { diff --git a/src/core/src/op/sin.cpp b/src/core/src/op/sin.cpp index dc224ada7e0..083b14645f2 100644 --- a/src/core/src/op/sin.cpp +++ b/src/core/src/op/sin.cpp @@ -47,10 +47,13 @@ bool Sin::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Sin_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + sin::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Sin::has_evaluate() const { diff --git a/src/core/src/op/sinh.cpp b/src/core/src/op/sinh.cpp index f678118a0fa..43cf622afcf 100644 --- a/src/core/src/op/sinh.cpp +++ b/src/core/src/op/sinh.cpp @@ -45,10 +45,13 @@ bool Sinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Sinh_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + sinh::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Sinh::has_evaluate() const { diff --git a/src/core/src/op/softmax.cpp b/src/core/src/op/softmax.cpp index d2e1326d76c..5166a42446f 100644 --- a/src/core/src/op/softmax.cpp +++ b/src/core/src/op/softmax.cpp @@ -68,11 +68,14 @@ bool Softmax::evaluate(TensorVector& outputs, const TensorVector& inputs) const const auto& input_shape = inputs[0].get_shape(); outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - input_shape, - AxisSet{m_axis}); + return IF_TYPE_OF(v1_Softmax_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, f64), + softmax::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + input_shape, + AxisSet{m_axis}); } bool Softmax::has_evaluate() const { @@ -140,11 +143,14 @@ bool Softmax::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - input_shape, - AxisSet{axis}); + return IF_TYPE_OF(v8_Softmax_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, f64), + softmax::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + input_shape, + AxisSet{axis}); } bool Softmax::has_evaluate() const { diff --git a/src/core/src/op/softplus.cpp b/src/core/src/op/softplus.cpp index a5896c00795..39fe5eb257c 100644 --- a/src/core/src/op/softplus.cpp +++ b/src/core/src/op/softplus.cpp @@ -57,10 +57,13 @@ bool SoftPlus::evaluate(TensorVector& outputs, const TensorVector& inputs) const const auto count = shape_size(input_shape); outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - count); + return IF_TYPE_OF(v4_SoftPlus_evaluate, + OV_PP_ET_LIST(bf16, f16, f32), + softplus::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + count); } bool SoftPlus::has_evaluate() const { diff --git a/src/core/src/op/softsign.cpp b/src/core/src/op/softsign.cpp index 733b193b724..37183363c78 100644 --- a/src/core/src/op/softsign.cpp +++ b/src/core/src/op/softsign.cpp @@ -75,10 +75,13 @@ bool SoftSign::evaluate(TensorVector& outputs, const auto& input_shape = inputs[0].get_shape(); outputs[0].set_shape(input_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(input_shape)); + return IF_TYPE_OF(v9_SoftSign_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, f64), + softsign::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(input_shape)); } } // namespace v9 } // namespace op diff --git a/src/core/src/op/sqrt.cpp b/src/core/src/op/sqrt.cpp index 0c05c6833bf..1a8c7501d7c 100644 --- a/src/core/src/op/sqrt.cpp +++ b/src/core/src/op/sqrt.cpp @@ -41,10 +41,13 @@ bool Sqrt::evaluate(TensorVector& outputs, const TensorVector& inputs) const { const auto& in_shape = inputs[0].get_shape(); outputs[0].set_shape(in_shape); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(in_shape)); + return IF_TYPE_OF(v0_Sqrt_evaluate, + OV_PP_ET_LIST(f16, f32, f64, i32, i64, u32, u64), + sqrt::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(in_shape)); } bool Sqrt::has_evaluate() const { diff --git a/src/core/src/op/subtract.cpp b/src/core/src/op/subtract.cpp index 6b21fa00483..22de7fa20bf 100644 --- a/src/core/src/op/subtract.cpp +++ b/src/core/src/op/subtract.cpp @@ -48,13 +48,16 @@ bool Subtract::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(infer_broadcast_shape(this, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - get_autob()); + return IF_TYPE_OF(v1_Subtract_evaluate, + OV_PP_ET_LIST(bf16, f16, f32, i8, i32, i64, u8, u32, u64), + subtract::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + get_autob()); } bool Subtract::has_evaluate() const { diff --git a/src/core/src/op/swish.cpp b/src/core/src/op/swish.cpp index ccc3d82b682..448aaa67e51 100644 --- a/src/core/src/op/swish.cpp +++ b/src/core/src/op/swish.cpp @@ -88,11 +88,14 @@ bool Swish::evaluate(TensorVector& outputs, const TensorVector& inputs) const { const auto& arg1 = inputs.size() == 2 ? inputs[1] : Tensor(); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - arg1, - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v4_Swish_evaluate, + OV_PP_ET_LIST(f16, f32), + swish::Evaluate, + inputs[0].get_element_type(), + inputs[0], + arg1, + outputs[0], + shape_size(inputs[0].get_shape())); } bool Swish::has_evaluate() const { diff --git a/src/core/src/op/tan.cpp b/src/core/src/op/tan.cpp index 5fac393b360..2d3f2b5fa1e 100644 --- a/src/core/src/op/tan.cpp +++ b/src/core/src/op/tan.cpp @@ -45,10 +45,13 @@ bool Tan::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Tan_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + tan::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Tan::has_evaluate() const { diff --git a/src/core/src/op/tanh.cpp b/src/core/src/op/tanh.cpp index 6be5ef3e2bf..26613d6818a 100644 --- a/src/core/src/op/tanh.cpp +++ b/src/core/src/op/tanh.cpp @@ -46,10 +46,13 @@ bool Tanh::evaluate(TensorVector& outputs, const TensorVector& inputs) const { outputs[0].set_shape(inputs[0].get_shape()); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - shape_size(inputs[0].get_shape())); + return IF_TYPE_OF(v0_Tanh_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + tanh::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } bool Tanh::has_evaluate() const { diff --git a/src/core/src/op/topk.cpp b/src/core/src/op/topk.cpp index a84d0490d9b..3d11e5967d2 100644 --- a/src/core/src/op/topk.cpp +++ b/src/core/src/op/topk.cpp @@ -59,16 +59,19 @@ struct Evaluate : public element::NoAction { const bool compute_max, const TopKSortType sort) { using namespace ov::element; - return IfTypeOf::apply(out_indices.get_element_type(), - in.data(), - out_values.data(), - out_indices, - in.get_shape(), - out_shape, - axis, - out_shape[axis], - compute_max, - sort); + return IF_TYPE_OF(topk_eval_by_idx_type, + OV_PP_ET_LIST(i32, i64), + EvalByIdxType, + out_indices.get_element_type(), + in.data(), + out_values.data(), + out_indices, + in.get_shape(), + out_shape, + axis, + out_shape[axis], + compute_max, + sort); } private: @@ -116,14 +119,17 @@ bool evaluate(const util::TopKBase* const node, TensorVector& outputs, const Ten } using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - outputs[0], - outputs[1], - output_shape, - axis, - (node->get_mode() == ov::op::TopKMode::MAX), - node->get_sort_type()); + return IF_TYPE_OF(topk_evaluate, + OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64), + topk::Evaluate, + inputs[0].get_element_type(), + inputs[0], + outputs[0], + outputs[1], + output_shape, + axis, + (node->get_mode() == ov::op::TopKMode::MAX), + node->get_sort_type()); } } // namespace } // namespace topk diff --git a/src/core/src/op/xor.cpp b/src/core/src/op/xor.cpp index c96599d9de3..df5286d72b6 100644 --- a/src/core/src/op/xor.cpp +++ b/src/core/src/op/xor.cpp @@ -45,13 +45,16 @@ bool evaluate(const Node* const op, TensorVector& outputs, const TensorVector& i outputs[0].set_shape(infer_broadcast_shape(op, inputs)); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), - inputs[0], - inputs[1], - outputs[0], - inputs[0].get_shape(), - inputs[1].get_shape(), - op->get_autob()); + return IF_TYPE_OF(Xor_evaluate, + boolean, + logxor::Evaluate, + inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + inputs[0].get_shape(), + inputs[1].get_shape(), + op->get_autob()); } } // namespace } // namespace logxor