Restore CC feature in operators evaluate (#21446)

This commit is contained in:
Pawel Raasz 2023-12-05 08:23:19 +01:00 committed by GitHub
parent f9d20d5aa0
commit 24209239bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
73 changed files with 718 additions and 475 deletions

View File

@ -42,10 +42,13 @@ bool Abs::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i32, i64, u32, u64>::apply<abs::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Abs_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64),
abs::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Abs::has_evaluate() const {

View File

@ -41,10 +41,13 @@ bool ov::op::v0::Acos::evaluate(TensorVector& outputs, const TensorVector& input
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<acos::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Acos_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
acos::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool ov::op::v0::Acos::has_evaluate() const {

View File

@ -41,10 +41,13 @@ bool ov::op::v3::Acosh::evaluate(TensorVector& outputs, const TensorVector& inpu
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<acosh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v3_Acosh_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
acosh::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool ov::op::v3::Acosh::has_evaluate() const {

View File

@ -48,14 +48,16 @@ bool Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<add::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Add_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
add::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Add::has_evaluate() const {

View File

@ -41,10 +41,13 @@ bool Asin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<asin::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Asin_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
asin::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Asin::has_evaluate() const {

View File

@ -40,10 +40,13 @@ bool Asinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<asinh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v3_Asinh_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
asinh::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Asinh::has_evaluate() const {

View File

@ -43,10 +43,13 @@ bool Atan::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<atan::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Atan_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
atan::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Atan::has_evaluate() const {

View File

@ -40,10 +40,13 @@ bool op::v3::Atanh::evaluate(TensorVector& outputs, const TensorVector& inputs)
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<atanh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v3_Atanh_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
atanh::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool op::v3::Atanh::has_evaluate() const {

View File

@ -44,11 +44,13 @@ bool Ceiling::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<ceiling::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Ceiling_evaluate,
OV_PP_ET_LIST(f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
ceiling::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Ceiling::has_evaluate() const {

View File

@ -68,13 +68,15 @@ bool Clamp::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<clamp::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
outputs[0],
get_min(),
get_max(),
shape_size(in_shape));
return IF_TYPE_OF(v0_Clamp_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
clamp::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
get_min(),
get_max(),
shape_size(in_shape));
}
bool Clamp::has_evaluate() const {

View File

@ -47,10 +47,13 @@ bool Cos::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<cos::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Cos_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
cos::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Cos::has_evaluate() const {

View File

@ -47,10 +47,13 @@ bool Cosh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<cosh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Cosh_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
cosh::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Cosh::has_evaluate() const {

View File

@ -35,12 +35,15 @@ bool evaluate(TensorVector& outputs, const TensorVector& inputs, const bool excl
const auto axis = ov::get_tensor_data_as<int64_t>(inputs[1]).front();
using namespace ov::element;
return IfTypeOf<f32>::apply<cumsum::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
axis,
exclusive,
reverse);
return IF_TYPE_OF(CumSum_evaluate,
f32,
cumsum::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
axis,
exclusive,
reverse);
}
} // namespace
} // namespace cumsum

View File

@ -242,14 +242,17 @@ bool Divide::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, bf16, f32>::apply<divide::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob(),
is_pythondiv());
return IF_TYPE_OF(v1_Divide_evaluate,
OV_PP_ET_LIST(f16, bf16, f32, i32, i64, u32, u64),
divide::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob(),
is_pythondiv());
return true;
}

View File

@ -101,14 +101,16 @@ bool Equal::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(ov::op::infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64>::apply<equal::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Equal_evaluate,
OV_PP_ET_LIST(boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64),
equal::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Equal::evaluate_lower(TensorVector& output_values) const {

View File

@ -43,10 +43,13 @@ bool Erf::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<erf::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Erf_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
erf::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Erf::has_evaluate() const {

View File

@ -44,10 +44,13 @@ bool Exp::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<exp::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Exp_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
exp::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Exp::has_evaluate() const {

View File

@ -123,10 +123,13 @@ bool Eye::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(output_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, f64, i8, i32, i64, u8>::apply<eye::Evaluate>(outputs[0].get_element_type(),
outputs[0],
output_shape,
diagonal_index);
return IF_TYPE_OF(v9_Eye_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, f64, i8, i32, i64, u8),
eye::Evaluate,
outputs[0].get_element_type(),
outputs[0],
output_shape,
diagonal_index);
}
} // namespace v9
} // namespace op

View File

@ -135,10 +135,15 @@ bool FakeConvert::evaluate(ov::TensorVector& outputs, const ov::TensorVector& in
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<bf16, f16, f32>::apply<fake_convert_details::Evaluate>(inputs[0].get_element_type(),
outputs,
inputs,
get_destination_type());
return IF_TYPE_OF(v13_FakeConvert_evaluate,
OV_PP_ET_LIST(bf16, f16, f32),
fake_convert_details::Evaluate,
inputs[0].get_element_type(),
outputs,
inputs,
get_destination_type());
return true;
}
} // namespace v13
} // namespace op

View File

@ -113,20 +113,23 @@ bool FakeQuantize::evaluate(TensorVector& outputs, const TensorVector& inputs) c
outputs[0].set_shape(shape0);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<fake_quantize::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
outputs[0],
shape0,
inputs[1].get_shape(),
inputs[2].get_shape(),
inputs[3].get_shape(),
inputs[4].get_shape(),
get_levels(),
get_auto_broadcast());
return IF_TYPE_OF(v0_FakeQuantize_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
fake_quantize::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
outputs[0],
shape0,
inputs[1].get_shape(),
inputs[2].get_shape(),
inputs[3].get_shape(),
inputs[4].get_shape(),
get_levels(),
get_auto_broadcast());
}
bool FakeQuantize::has_evaluate() const {

View File

@ -44,11 +44,13 @@ bool Floor::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<floor::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Floor_evaluate,
OV_PP_ET_LIST(f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
floor::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Floor::has_evaluate() const {

View File

@ -48,14 +48,16 @@ bool FloorMod::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i32, i64, u8, u32, u64>::apply<floor_mod::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_FloorMod_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i8, i32, i64, u8, u32, u64),
floor_mod::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool FloorMod::has_evaluate() const {

View File

@ -104,11 +104,14 @@ bool Gelu::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
const auto count = shape_size(input_shape);
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<f16, f32>::apply<gelu::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
m_approximation_mode,
count);
return IF_TYPE_OF(v7_Gelu_evaluate,
OV_PP_ET_LIST(f16, f32),
gelu::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
m_approximation_mode,
count);
}
bool Gelu::has_evaluate() const {

View File

@ -53,13 +53,16 @@ bool Greater::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<greater::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Greater_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
greater::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Greater::has_evaluate() const {

View File

@ -53,13 +53,16 @@ bool GreaterEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) c
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<greater_equal::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_GreaterEqual_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
greater_equal::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool GreaterEqual::has_evaluate() const {

View File

@ -25,13 +25,16 @@ struct Evaluate : element::NoAction<bool> {
const Shape& grid_shape,
const GridSample::Attributes& attributes) {
using namespace ov::element;
return IfTypeOf<f32>::apply<EvalByGridType>(grid.get_element_type(),
output.data<T>(),
data.data<const T>(),
grid,
data_shape,
grid_shape,
attributes);
return IF_TYPE_OF(eval_by_grid_type,
OV_PP_ET_LIST(f32),
EvalByGridType,
grid.get_element_type(),
output.data<T>(),
data.data<const T>(),
grid,
data_shape,
grid_shape,
attributes);
}
private:
@ -100,13 +103,16 @@ bool GridSample::evaluate(TensorVector& outputs, const TensorVector& inputs) con
outputs[0].set_shape(out_shape);
using namespace ov::element;
return IfTypeOf<f32>::apply<Evaluate>(inputs[0].get_element_type(),
outputs[0],
inputs[0],
inputs[1],
inputs[0].get_shape(),
inputs[1].get_shape(),
m_attributes);
return IF_TYPE_OF(v9_GridSample_evaluate,
OV_PP_ET_LIST(f32),
Evaluate,
inputs[0].get_element_type(),
outputs[0],
inputs[0],
inputs[1],
inputs[0].get_shape(),
inputs[1].get_shape(),
m_attributes);
}
bool GridSample::has_evaluate() const {

View File

@ -42,11 +42,15 @@ bool HSigmoid::evaluate(TensorVector& outputs, const TensorVector& inputs) const
const auto& input_shape = inputs[0].get_shape();
const auto count = shape_size(input_shape);
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32>::apply<hsigmoid::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
return IF_TYPE_OF(v5_HSigmoid_evaluate,
OV_PP_ET_LIST(bf16, f16, f32),
hsigmoid::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
}
bool HSigmoid::has_evaluate() const {

View File

@ -43,10 +43,13 @@ bool HSwish::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
const auto count = shape_size(input_shape);
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32>::apply<hswish::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
return IF_TYPE_OF(v4_HSwish_evaluate,
OV_PP_ET_LIST(bf16, f16, f32),
hswish::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
}
bool HSwish::has_evaluate() const {

View File

@ -52,13 +52,16 @@ bool Less::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<less::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Less_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
less::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Less::has_evaluate() const {

View File

@ -53,13 +53,16 @@ bool LessEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<less_equal::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_LessEqual_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
less_equal::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool LessEqual::has_evaluate() const {

View File

@ -42,10 +42,13 @@ bool Log::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
const auto count = shape_size(input_shape);
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<log::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
return IF_TYPE_OF(v0_Log_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
log::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
}
bool Log::has_evaluate() const {

View File

@ -51,11 +51,13 @@ bool LogicalNot::evaluate(TensorVector& outputs, const TensorVector& inputs) con
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<boolean, i32, i64, u32, u64, f16, f32>::apply<logical_not::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v1_LogicalNot_evaluate,
OV_PP_ET_LIST(boolean, i32, i64, u32, u64, f16, f32),
logical_not::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool LogicalNot::has_evaluate() const {

View File

@ -68,15 +68,18 @@ bool MatMul::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(out_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<matmul::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
out_shape,
m_transpose_a,
m_transpose_b);
return IF_TYPE_OF(v0_MatMul_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
matmul::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
out_shape,
m_transpose_a,
m_transpose_b);
}
bool MatMul::has_evaluate() const {

View File

@ -93,15 +93,18 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(output_shape.get_shape());
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<maxpool::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
inputs[0].get_shape(),
outputs[0].get_shape(),
get_kernel(),
get_strides(),
get_pads_begin(),
get_pads_end());
return IF_TYPE_OF(v1_MaxPool_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
maxpool::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
inputs[0].get_shape(),
outputs[0].get_shape(),
get_kernel(),
get_strides(),
get_pads_begin(),
get_pads_end());
}
bool MaxPool::has_evaluate() const {
@ -208,18 +211,21 @@ struct Evaluate : element::NoAction<bool> {
const Shape& pads_end,
const int64_t axis) {
using namespace ov::element;
return IfTypeOf<i32, i64>::apply<EvalByIdxType>(out_indices.get_element_type(),
in.data<const T>(),
out_values.data<T>(),
out_indices,
in_shape,
out_shape,
kernel,
strides,
dilations,
pads_begin,
pads_end,
axis);
return IF_TYPE_OF(maxpool_eval_by_idx_type,
OV_PP_ET_LIST(i32, i64),
EvalByIdxType,
out_indices.get_element_type(),
in.data<const T>(),
out_values.data<T>(),
out_indices,
in_shape,
out_shape,
kernel,
strides,
dilations,
pads_begin,
pads_end,
axis);
}
private:
@ -265,18 +271,21 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(output_shape.get_shape());
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<maxpool::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
outputs[1],
inputs[0].get_shape(),
outputs[0].get_shape(),
get_kernel(),
get_strides(),
get_dilations(),
get_pads_begin(),
get_pads_end(),
get_axis());
return IF_TYPE_OF(v8_MaxPool_evaluate,
OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64),
maxpool::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
outputs[1],
inputs[0].get_shape(),
outputs[0].get_shape(),
get_kernel(),
get_strides(),
get_dilations(),
get_pads_begin(),
get_pads_end(),
get_axis());
}
bool MaxPool::has_evaluate() const {

View File

@ -48,13 +48,16 @@ bool Maximum::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<maximum::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Maximum_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
maximum::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Maximum::has_evaluate() const {

View File

@ -49,13 +49,16 @@ bool Minimum::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u8, u16, u32, u64>::apply<minimum::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Minimum_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u8, u16, u32, u64),
minimum::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Minimum::has_evaluate() const {

View File

@ -58,10 +58,13 @@ bool Mish::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32>::apply<mish::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v4_Mish_evaluate,
OV_PP_ET_LIST(f16, f32),
mish::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Mish::has_evaluate() const {

View File

@ -244,13 +244,16 @@ bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<mod::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Mod_evaluate,
OV_PP_ET_LIST(i8, i16, i32, i64, u8, u16, u32, u64),
mod::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Mod::evaluate_lower(TensorVector& outputs) const {

View File

@ -47,13 +47,16 @@ bool Multiply::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, f64, i32, i64, u32, u64>::apply<multiply::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Multiply_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, f64, i32, i64, u32, u64),
multiply::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Multiply::has_evaluate() const {

View File

@ -42,10 +42,13 @@ bool Negative::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i32, i64>::apply<negative::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Negative_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i32, i64),
negative::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Negative::has_evaluate() const {

View File

@ -26,7 +26,13 @@ struct Evaluate : public element::NoAction<bool> {
out.set_shape(out_shape);
using namespace ov::element;
return IfTypeOf<i32, i64>::apply<EvalByOutType>(out.get_element_type(), in_data, out, in_shape);
return IF_TYPE_OF(non_zero_out_type,
OV_PP_ET_LIST(i32, i64),
EvalByOutType,
out.get_element_type(),
in_data,
out,
in_shape);
}
private:
@ -114,12 +120,14 @@ bool NonZero::evaluate(TensorVector& outputs, const TensorVector& inputs) const
auto& output = outputs[0];
using namespace ov::element;
const auto& input_shape = input.get_shape();
return IfTypeOf<boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64>::apply<non_zero::Evaluate>(
input.get_element_type(),
input,
input_shape,
input_shape.size(),
output);
return IF_TYPE_OF(v3_NonZero_evaluate,
OV_PP_ET_LIST(boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64),
non_zero::Evaluate,
input.get_element_type(),
input,
input_shape,
input_shape.size(),
output);
}
bool NonZero::has_evaluate() const {

View File

@ -51,13 +51,16 @@ bool NotEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<not_equal::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_NotEqual_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
not_equal::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool NotEqual::has_evaluate() const {

View File

@ -118,15 +118,18 @@ bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
auto& output = outputs[0];
output.set_shape(output_shape);
using namespace ov::element;
return IfTypeOf<i32, i64>::apply<one_hot::Evaluate>(indices.get_element_type(),
indices,
indices_shape,
static_cast<char*>(output.data()),
output.get_element_type().size(),
output.get_shape()[axis],
on_value,
off_value,
axis);
return IF_TYPE_OF(v1_OneHot_evaluate,
OV_PP_ET_LIST(i32, i64),
one_hot::Evaluate,
indices.get_element_type(),
indices,
indices_shape,
static_cast<char*>(output.data()),
output.get_element_type().size(),
output.get_shape()[axis],
on_value,
off_value,
axis);
}
bool OneHot::has_evaluate() const {

View File

@ -49,13 +49,16 @@ bool Power::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
out.set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i32, i64, u32, u64>::apply<power::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
out,
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Power_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64),
power::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
out,
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Power::has_evaluate() const {

View File

@ -55,12 +55,15 @@ bool PRelu::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
out.set_shape(arg_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8>::apply<prelu::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
out,
arg_shape,
inputs[1].get_shape());
return IF_TYPE_OF(v0_PRelu_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i8),
prelu::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
out,
arg_shape,
inputs[1].get_shape());
}
bool PRelu::has_evaluate() const {

View File

@ -119,11 +119,14 @@ bool Range::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
const auto step = get_tensor_data_as<double>(inputs[2])[0];
using namespace ov::element;
return IfTypeOf<RANGE_ET_LIST>::apply<range::Evaluate>(out.get_element_type(),
start,
step,
shape_size(out_shape),
out);
return IF_TYPE_OF(v4_Range_evaluate,
RANGE_ET_LIST,
range::Evaluate,
out.get_element_type(),
start,
step,
shape_size(out_shape),
out);
}
bool Range::has_evaluate() const {
@ -199,11 +202,14 @@ bool Range::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
out.set_shape(out_shape);
using namespace ov::element;
return IfTypeOf<RANGE_ET_LIST>::apply<range::Evaluate>(out.get_element_type(),
start,
step,
shape_size(out_shape),
out);
return IF_TYPE_OF(v0_Range_evaluate,
RANGE_ET_LIST,
range::Evaluate,
out.get_element_type(),
start,
step,
shape_size(out_shape),
out);
}
bool Range::has_evaluate() const {

View File

@ -48,10 +48,13 @@ bool ReduceL1::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i32, i64>::apply<reduce_l1::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v4_ReduceL1_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i32, i64),
reduce_l1::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceL1::has_evaluate() const {

View File

@ -47,10 +47,13 @@ bool ReduceL2::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32>::apply<reduce_l2::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v4_ReduceL2_evaluate,
OV_PP_ET_LIST(bf16, f16, f32),
reduce_l2::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceL2::has_evaluate() const {

View File

@ -47,10 +47,13 @@ bool ReduceLogicalAnd::evaluate(TensorVector& outputs, const TensorVector& input
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<boolean>::apply<reduce_and::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceLogicalAnd_evaluate,
boolean,
reduce_and::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceLogicalAnd::has_evaluate() const {

View File

@ -48,10 +48,13 @@ bool ReduceLogicalOr::evaluate(TensorVector& outputs, const TensorVector& inputs
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<boolean>::apply<reduce_or::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceLogicalOr_evaluate,
boolean,
reduce_or::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceLogicalOr::has_evaluate() const {

View File

@ -47,10 +47,13 @@ bool ReduceMax::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<reduce_max::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceMax_evaluate,
OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64),
reduce_max::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceMax::has_evaluate() const {

View File

@ -45,10 +45,13 @@ bool ReduceMean::evaluate(TensorVector& outputs, const TensorVector& inputs) con
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_mean::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceMean_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
reduce_mean::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceMean::has_evaluate() const {

View File

@ -45,10 +45,13 @@ bool ReduceMin::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<reduce_min::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceMin_evaluate,
OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64),
reduce_min::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceMin::has_evaluate() const {

View File

@ -57,10 +57,13 @@ bool ReduceProd::evaluate(TensorVector& outputs, const TensorVector& inputs) con
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_prod::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceProd_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
reduce_prod::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceProd::has_evaluate() const {

View File

@ -45,10 +45,13 @@ bool ReduceSum::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_sum::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
return IF_TYPE_OF(v1_ReduceSum_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
reduce_sum::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool ReduceSum::has_evaluate() const {

View File

@ -42,10 +42,13 @@ bool Relu::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<relu::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Relu_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
relu::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Relu::has_evaluate() const {

View File

@ -59,12 +59,14 @@ bool Round::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
auto& out = outputs.front();
using namespace ov::element;
return IfTypeOf<boolean, i8, i16, i32, i64, u8, u16, u32, u64, bf16, f16, f32>::apply<round::Evaluate>(
arg0.get_element_type(),
arg0,
out,
shape_size(arg0.get_shape()),
get_mode());
return IF_TYPE_OF(v5_Round_evaluate,
OV_PP_ET_LIST(boolean, i8, i16, i32, i64, u8, u16, u32, u64, bf16, f16, f32),
round::Evaluate,
arg0.get_element_type(),
arg0,
out,
shape_size(arg0.get_shape()),
get_mode());
}
bool Round::has_evaluate() const {

View File

@ -101,16 +101,19 @@ struct Evaluate : public element::NoAction<bool> {
) {
using namespace ov::element;
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<EvaluateByIndicesType>(indices.get_element_type(),
data.data<const DT>(),
indices,
updates.data<const DT>(),
output.data<DT>(),
data_shape,
indices_shape,
axis,
reduction,
use_init_value);
return IF_TYPE_OF(scatter_el_update_idx_type,
OV_PP_ET_LIST(i8, i16, i32, i64, u8, u16, u32, u64),
EvaluateByIndicesType,
indices.get_element_type(),
data.data<const DT>(),
indices,
updates.data<const DT>(),
output.data<DT>(),
data_shape,
indices_shape,
axis,
reduction,
use_init_value);
}
private:
@ -156,18 +159,21 @@ bool evaluate(TensorVector& outputs,
const auto& data_shape = data.get_shape();
const auto& indices_shape = indices.get_shape();
output.set_shape(data_shape);
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i16, i32, i64, u32, u64>::apply<scatter_elements_update::Evaluate>(
data.get_element_type(),
data,
indices,
updates,
output,
data_shape,
indices_shape,
axis,
reduction,
use_init_value);
return IF_TYPE_OF(scatter_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i16, i32, i64, u32, u64),
scatter_elements_update::Evaluate,
data.get_element_type(),
data,
indices,
updates,
output,
data_shape,
indices_shape,
axis,
reduction,
use_init_value);
}
} // namespace
} // namespace scatter_elements_update

View File

@ -24,14 +24,17 @@ struct Evaluate : public element::NoAction<bool> {
const Shape& indices_shape,
const Shape& updates_shape) {
using namespace ov::element;
return IfTypeOf<i32, i64>::apply<EvaluateByIndicesType>(indices.get_element_type(),
data.data<const DT>(),
indices,
updates.data<const DT>(),
output.data<DT>(),
data_shape,
indices_shape,
updates_shape);
return IF_TYPE_OF(sctter_nd_eval_idx_type,
OV_PP_ET_LIST(i32, i64),
EvaluateByIndicesType,
indices.get_element_type(),
data.data<const DT>(),
indices,
updates.data<const DT>(),
output.data<DT>(),
data_shape,
indices_shape,
updates_shape);
}
private:
@ -82,14 +85,17 @@ bool ScatterNDUpdate::evaluate(TensorVector& outputs, const TensorVector& inputs
const auto& updates_shape = updates.get_shape();
output.set_shape(data_shape);
using namespace ov::element;
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<scatter_nd_update::Evaluate>(data.get_element_type(),
data,
indices,
updates,
output,
data_shape,
indices_shape,
updates_shape);
return IF_TYPE_OF(v3_ScatterNDUpdate_evaluate,
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
scatter_nd_update::Evaluate,
data.get_element_type(),
data,
indices,
updates,
output,
data_shape,
indices_shape,
updates_shape);
}
bool ScatterNDUpdate::has_evaluate() const {

View File

@ -44,10 +44,13 @@ bool Sigmoid::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<sigmoid::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Sigmoid_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
sigmoid::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Sigmoid::has_evaluate() const {

View File

@ -43,10 +43,13 @@ bool Sign::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<sign::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Sign_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
sign::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Sign::has_evaluate() const {

View File

@ -47,10 +47,13 @@ bool Sin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<sin::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Sin_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
sin::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Sin::has_evaluate() const {

View File

@ -45,10 +45,13 @@ bool Sinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<sinh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Sinh_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
sinh::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Sinh::has_evaluate() const {

View File

@ -68,11 +68,14 @@ bool Softmax::evaluate(TensorVector& outputs, const TensorVector& inputs) const
const auto& input_shape = inputs[0].get_shape();
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, f64>::apply<softmax::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
input_shape,
AxisSet{m_axis});
return IF_TYPE_OF(v1_Softmax_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, f64),
softmax::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
input_shape,
AxisSet{m_axis});
}
bool Softmax::has_evaluate() const {
@ -140,11 +143,14 @@ bool Softmax::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, f64>::apply<softmax::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
input_shape,
AxisSet{axis});
return IF_TYPE_OF(v8_Softmax_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, f64),
softmax::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
input_shape,
AxisSet{axis});
}
bool Softmax::has_evaluate() const {

View File

@ -57,10 +57,13 @@ bool SoftPlus::evaluate(TensorVector& outputs, const TensorVector& inputs) const
const auto count = shape_size(input_shape);
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32>::apply<softplus::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
return IF_TYPE_OF(v4_SoftPlus_evaluate,
OV_PP_ET_LIST(bf16, f16, f32),
softplus::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
count);
}
bool SoftPlus::has_evaluate() const {

View File

@ -75,10 +75,13 @@ bool SoftSign::evaluate(TensorVector& outputs,
const auto& input_shape = inputs[0].get_shape();
outputs[0].set_shape(input_shape);
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, f64>::apply<softsign::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(input_shape));
return IF_TYPE_OF(v9_SoftSign_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, f64),
softsign::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(input_shape));
}
} // namespace v9
} // namespace op

View File

@ -41,10 +41,13 @@ bool Sqrt::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
const auto& in_shape = inputs[0].get_shape();
outputs[0].set_shape(in_shape);
using namespace ov::element;
return IfTypeOf<f16, f32, f64, i32, i64, u32, u64>::apply<sqrt::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
return IF_TYPE_OF(v0_Sqrt_evaluate,
OV_PP_ET_LIST(f16, f32, f64, i32, i64, u32, u64),
sqrt::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(in_shape));
}
bool Sqrt::has_evaluate() const {

View File

@ -48,13 +48,16 @@ bool Subtract::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i32, i64, u8, u32, u64>::apply<subtract::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return IF_TYPE_OF(v1_Subtract_evaluate,
OV_PP_ET_LIST(bf16, f16, f32, i8, i32, i64, u8, u32, u64),
subtract::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool Subtract::has_evaluate() const {

View File

@ -88,11 +88,14 @@ bool Swish::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
const auto& arg1 = inputs.size() == 2 ? inputs[1] : Tensor();
using namespace ov::element;
return IfTypeOf<f16, f32>::apply<swish::Evaluate>(inputs[0].get_element_type(),
inputs[0],
arg1,
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v4_Swish_evaluate,
OV_PP_ET_LIST(f16, f32),
swish::Evaluate,
inputs[0].get_element_type(),
inputs[0],
arg1,
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Swish::has_evaluate() const {

View File

@ -45,10 +45,13 @@ bool Tan::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<tan::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Tan_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
tan::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Tan::has_evaluate() const {

View File

@ -46,10 +46,13 @@ bool Tanh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
outputs[0].set_shape(inputs[0].get_shape());
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<tanh::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
return IF_TYPE_OF(v0_Tanh_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
tanh::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
shape_size(inputs[0].get_shape()));
}
bool Tanh::has_evaluate() const {

View File

@ -59,16 +59,19 @@ struct Evaluate : public element::NoAction<bool> {
const bool compute_max,
const TopKSortType sort) {
using namespace ov::element;
return IfTypeOf<i32, i64>::apply<EvalByIdxType>(out_indices.get_element_type(),
in.data<const T>(),
out_values.data<T>(),
out_indices,
in.get_shape(),
out_shape,
axis,
out_shape[axis],
compute_max,
sort);
return IF_TYPE_OF(topk_eval_by_idx_type,
OV_PP_ET_LIST(i32, i64),
EvalByIdxType,
out_indices.get_element_type(),
in.data<const T>(),
out_values.data<T>(),
out_indices,
in.get_shape(),
out_shape,
axis,
out_shape[axis],
compute_max,
sort);
}
private:
@ -116,14 +119,17 @@ bool evaluate(const util::TopKBase* const node, TensorVector& outputs, const Ten
}
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<topk::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
outputs[1],
output_shape,
axis,
(node->get_mode() == ov::op::TopKMode::MAX),
node->get_sort_type());
return IF_TYPE_OF(topk_evaluate,
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
topk::Evaluate,
inputs[0].get_element_type(),
inputs[0],
outputs[0],
outputs[1],
output_shape,
axis,
(node->get_mode() == ov::op::TopKMode::MAX),
node->get_sort_type());
}
} // namespace
} // namespace topk

View File

@ -45,13 +45,16 @@ bool evaluate(const Node* const op, TensorVector& outputs, const TensorVector& i
outputs[0].set_shape(infer_broadcast_shape(op, inputs));
using namespace ov::element;
return IfTypeOf<boolean>::apply<logxor::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
op->get_autob());
return IF_TYPE_OF(Xor_evaluate,
boolean,
logxor::Evaluate,
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
op->get_autob());
}
} // namespace
} // namespace logxor