Restore CC feature in operators evaluate (#21446)
This commit is contained in:
parent
f9d20d5aa0
commit
24209239bf
@ -42,10 +42,13 @@ bool Abs::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i32, i64, u32, u64>::apply<abs::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Abs_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
abs::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Abs::has_evaluate() const {
|
bool Abs::has_evaluate() const {
|
||||||
|
@ -41,10 +41,13 @@ bool ov::op::v0::Acos::evaluate(TensorVector& outputs, const TensorVector& input
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<acos::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Acos_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
acos::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ov::op::v0::Acos::has_evaluate() const {
|
bool ov::op::v0::Acos::has_evaluate() const {
|
||||||
|
@ -41,10 +41,13 @@ bool ov::op::v3::Acosh::evaluate(TensorVector& outputs, const TensorVector& inpu
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<acosh::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v3_Acosh_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
acosh::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ov::op::v3::Acosh::has_evaluate() const {
|
bool ov::op::v3::Acosh::has_evaluate() const {
|
||||||
|
@ -48,14 +48,16 @@ bool Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<add::Evaluate>(
|
return IF_TYPE_OF(v1_Add_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[0],
|
add::Evaluate,
|
||||||
inputs[1],
|
inputs[0].get_element_type(),
|
||||||
outputs[0],
|
inputs[0],
|
||||||
inputs[0].get_shape(),
|
inputs[1],
|
||||||
inputs[1].get_shape(),
|
outputs[0],
|
||||||
get_autob());
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Add::has_evaluate() const {
|
bool Add::has_evaluate() const {
|
||||||
|
@ -41,10 +41,13 @@ bool Asin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<asin::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Asin_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
asin::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Asin::has_evaluate() const {
|
bool Asin::has_evaluate() const {
|
||||||
|
@ -40,10 +40,13 @@ bool Asinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<asinh::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v3_Asinh_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
asinh::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Asinh::has_evaluate() const {
|
bool Asinh::has_evaluate() const {
|
||||||
|
@ -43,10 +43,13 @@ bool Atan::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<atan::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Atan_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
atan::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Atan::has_evaluate() const {
|
bool Atan::has_evaluate() const {
|
||||||
|
@ -40,10 +40,13 @@ bool op::v3::Atanh::evaluate(TensorVector& outputs, const TensorVector& inputs)
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<atanh::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v3_Atanh_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
atanh::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool op::v3::Atanh::has_evaluate() const {
|
bool op::v3::Atanh::has_evaluate() const {
|
||||||
|
@ -44,11 +44,13 @@ bool Ceiling::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<ceiling::Evaluate>(
|
return IF_TYPE_OF(v0_Ceiling_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[0],
|
ceiling::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Ceiling::has_evaluate() const {
|
bool Ceiling::has_evaluate() const {
|
||||||
|
@ -68,13 +68,15 @@ bool Clamp::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<clamp::Evaluate>(
|
return IF_TYPE_OF(v0_Clamp_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[0],
|
clamp::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
get_min(),
|
inputs[0],
|
||||||
get_max(),
|
outputs[0],
|
||||||
shape_size(in_shape));
|
get_min(),
|
||||||
|
get_max(),
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Clamp::has_evaluate() const {
|
bool Clamp::has_evaluate() const {
|
||||||
|
@ -47,10 +47,13 @@ bool Cos::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<cos::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Cos_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
cos::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Cos::has_evaluate() const {
|
bool Cos::has_evaluate() const {
|
||||||
|
@ -47,10 +47,13 @@ bool Cosh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<cosh::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Cosh_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
cosh::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Cosh::has_evaluate() const {
|
bool Cosh::has_evaluate() const {
|
||||||
|
@ -35,12 +35,15 @@ bool evaluate(TensorVector& outputs, const TensorVector& inputs, const bool excl
|
|||||||
const auto axis = ov::get_tensor_data_as<int64_t>(inputs[1]).front();
|
const auto axis = ov::get_tensor_data_as<int64_t>(inputs[1]).front();
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f32>::apply<cumsum::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(CumSum_evaluate,
|
||||||
inputs[0],
|
f32,
|
||||||
outputs[0],
|
cumsum::Evaluate,
|
||||||
axis,
|
inputs[0].get_element_type(),
|
||||||
exclusive,
|
inputs[0],
|
||||||
reverse);
|
outputs[0],
|
||||||
|
axis,
|
||||||
|
exclusive,
|
||||||
|
reverse);
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace cumsum
|
} // namespace cumsum
|
||||||
|
@ -242,14 +242,17 @@ bool Divide::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, bf16, f32>::apply<divide::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Divide_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, bf16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
divide::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob(),
|
outputs[0],
|
||||||
is_pythondiv());
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob(),
|
||||||
|
is_pythondiv());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,14 +101,16 @@ bool Equal::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
|
|
||||||
outputs[0].set_shape(ov::op::infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(ov::op::infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64>::apply<equal::Evaluate>(
|
return IF_TYPE_OF(v1_Equal_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[0],
|
equal::Evaluate,
|
||||||
inputs[1],
|
inputs[0].get_element_type(),
|
||||||
outputs[0],
|
inputs[0],
|
||||||
inputs[0].get_shape(),
|
inputs[1],
|
||||||
inputs[1].get_shape(),
|
outputs[0],
|
||||||
get_autob());
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Equal::evaluate_lower(TensorVector& output_values) const {
|
bool Equal::evaluate_lower(TensorVector& output_values) const {
|
||||||
|
@ -43,10 +43,13 @@ bool Erf::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<erf::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Erf_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
erf::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Erf::has_evaluate() const {
|
bool Erf::has_evaluate() const {
|
||||||
|
@ -44,10 +44,13 @@ bool Exp::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<exp::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Exp_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
exp::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Exp::has_evaluate() const {
|
bool Exp::has_evaluate() const {
|
||||||
|
@ -123,10 +123,13 @@ bool Eye::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
|
|
||||||
outputs[0].set_shape(output_shape);
|
outputs[0].set_shape(output_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, f64, i8, i32, i64, u8>::apply<eye::Evaluate>(outputs[0].get_element_type(),
|
return IF_TYPE_OF(v9_Eye_evaluate,
|
||||||
outputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, f64, i8, i32, i64, u8),
|
||||||
output_shape,
|
eye::Evaluate,
|
||||||
diagonal_index);
|
outputs[0].get_element_type(),
|
||||||
|
outputs[0],
|
||||||
|
output_shape,
|
||||||
|
diagonal_index);
|
||||||
}
|
}
|
||||||
} // namespace v9
|
} // namespace v9
|
||||||
} // namespace op
|
} // namespace op
|
||||||
|
@ -135,10 +135,15 @@ bool FakeConvert::evaluate(ov::TensorVector& outputs, const ov::TensorVector& in
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32>::apply<fake_convert_details::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v13_FakeConvert_evaluate,
|
||||||
outputs,
|
OV_PP_ET_LIST(bf16, f16, f32),
|
||||||
inputs,
|
fake_convert_details::Evaluate,
|
||||||
get_destination_type());
|
inputs[0].get_element_type(),
|
||||||
|
outputs,
|
||||||
|
inputs,
|
||||||
|
get_destination_type());
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
} // namespace v13
|
} // namespace v13
|
||||||
} // namespace op
|
} // namespace op
|
||||||
|
@ -113,20 +113,23 @@ bool FakeQuantize::evaluate(TensorVector& outputs, const TensorVector& inputs) c
|
|||||||
outputs[0].set_shape(shape0);
|
outputs[0].set_shape(shape0);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<fake_quantize::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_FakeQuantize_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
fake_quantize::Evaluate,
|
||||||
inputs[2],
|
inputs[0].get_element_type(),
|
||||||
inputs[3],
|
inputs[0],
|
||||||
inputs[4],
|
inputs[1],
|
||||||
outputs[0],
|
inputs[2],
|
||||||
shape0,
|
inputs[3],
|
||||||
inputs[1].get_shape(),
|
inputs[4],
|
||||||
inputs[2].get_shape(),
|
outputs[0],
|
||||||
inputs[3].get_shape(),
|
shape0,
|
||||||
inputs[4].get_shape(),
|
inputs[1].get_shape(),
|
||||||
get_levels(),
|
inputs[2].get_shape(),
|
||||||
get_auto_broadcast());
|
inputs[3].get_shape(),
|
||||||
|
inputs[4].get_shape(),
|
||||||
|
get_levels(),
|
||||||
|
get_auto_broadcast());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FakeQuantize::has_evaluate() const {
|
bool FakeQuantize::has_evaluate() const {
|
||||||
|
@ -44,11 +44,13 @@ bool Floor::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<floor::Evaluate>(
|
return IF_TYPE_OF(v0_Floor_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(f16, f32, i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[0],
|
floor::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
shape_size(in_shape));
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Floor::has_evaluate() const {
|
bool Floor::has_evaluate() const {
|
||||||
|
@ -48,14 +48,16 @@ bool FloorMod::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i8, i32, i64, u8, u32, u64>::apply<floor_mod::Evaluate>(
|
return IF_TYPE_OF(v1_FloorMod_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(bf16, f16, f32, i8, i32, i64, u8, u32, u64),
|
||||||
inputs[0],
|
floor_mod::Evaluate,
|
||||||
inputs[1],
|
inputs[0].get_element_type(),
|
||||||
outputs[0],
|
inputs[0],
|
||||||
inputs[0].get_shape(),
|
inputs[1],
|
||||||
inputs[1].get_shape(),
|
outputs[0],
|
||||||
get_autob());
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FloorMod::has_evaluate() const {
|
bool FloorMod::has_evaluate() const {
|
||||||
|
@ -104,11 +104,14 @@ bool Gelu::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
const auto count = shape_size(input_shape);
|
const auto count = shape_size(input_shape);
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32>::apply<gelu::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v7_Gelu_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32),
|
||||||
outputs[0],
|
gelu::Evaluate,
|
||||||
m_approximation_mode,
|
inputs[0].get_element_type(),
|
||||||
count);
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
m_approximation_mode,
|
||||||
|
count);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Gelu::has_evaluate() const {
|
bool Gelu::has_evaluate() const {
|
||||||
|
@ -53,13 +53,16 @@ bool Greater::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<greater::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Greater_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
greater::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Greater::has_evaluate() const {
|
bool Greater::has_evaluate() const {
|
||||||
|
@ -53,13 +53,16 @@ bool GreaterEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) c
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<greater_equal::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_GreaterEqual_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
greater_equal::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GreaterEqual::has_evaluate() const {
|
bool GreaterEqual::has_evaluate() const {
|
||||||
|
@ -25,13 +25,16 @@ struct Evaluate : element::NoAction<bool> {
|
|||||||
const Shape& grid_shape,
|
const Shape& grid_shape,
|
||||||
const GridSample::Attributes& attributes) {
|
const GridSample::Attributes& attributes) {
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f32>::apply<EvalByGridType>(grid.get_element_type(),
|
return IF_TYPE_OF(eval_by_grid_type,
|
||||||
output.data<T>(),
|
OV_PP_ET_LIST(f32),
|
||||||
data.data<const T>(),
|
EvalByGridType,
|
||||||
grid,
|
grid.get_element_type(),
|
||||||
data_shape,
|
output.data<T>(),
|
||||||
grid_shape,
|
data.data<const T>(),
|
||||||
attributes);
|
grid,
|
||||||
|
data_shape,
|
||||||
|
grid_shape,
|
||||||
|
attributes);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -100,13 +103,16 @@ bool GridSample::evaluate(TensorVector& outputs, const TensorVector& inputs) con
|
|||||||
outputs[0].set_shape(out_shape);
|
outputs[0].set_shape(out_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f32>::apply<Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v9_GridSample_evaluate,
|
||||||
outputs[0],
|
OV_PP_ET_LIST(f32),
|
||||||
inputs[0],
|
Evaluate,
|
||||||
inputs[1],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
outputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[0],
|
||||||
m_attributes);
|
inputs[1],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
m_attributes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GridSample::has_evaluate() const {
|
bool GridSample::has_evaluate() const {
|
||||||
|
@ -42,11 +42,15 @@ bool HSigmoid::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
const auto& input_shape = inputs[0].get_shape();
|
const auto& input_shape = inputs[0].get_shape();
|
||||||
const auto count = shape_size(input_shape);
|
const auto count = shape_size(input_shape);
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32>::apply<hsigmoid::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v5_HSigmoid_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32),
|
||||||
outputs[0],
|
hsigmoid::Evaluate,
|
||||||
count);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
count);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HSigmoid::has_evaluate() const {
|
bool HSigmoid::has_evaluate() const {
|
||||||
|
@ -43,10 +43,13 @@ bool HSwish::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
const auto count = shape_size(input_shape);
|
const auto count = shape_size(input_shape);
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32>::apply<hswish::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v4_HSwish_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32),
|
||||||
outputs[0],
|
hswish::Evaluate,
|
||||||
count);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
count);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HSwish::has_evaluate() const {
|
bool HSwish::has_evaluate() const {
|
||||||
|
@ -52,13 +52,16 @@ bool Less::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<less::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Less_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
less::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Less::has_evaluate() const {
|
bool Less::has_evaluate() const {
|
||||||
|
@ -53,13 +53,16 @@ bool LessEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<less_equal::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_LessEqual_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
less_equal::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LessEqual::has_evaluate() const {
|
bool LessEqual::has_evaluate() const {
|
||||||
|
@ -42,10 +42,13 @@ bool Log::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
const auto count = shape_size(input_shape);
|
const auto count = shape_size(input_shape);
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<log::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Log_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
log::Evaluate,
|
||||||
count);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
count);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Log::has_evaluate() const {
|
bool Log::has_evaluate() const {
|
||||||
|
@ -51,11 +51,13 @@ bool LogicalNot::evaluate(TensorVector& outputs, const TensorVector& inputs) con
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, i32, i64, u32, u64, f16, f32>::apply<logical_not::Evaluate>(
|
return IF_TYPE_OF(v1_LogicalNot_evaluate,
|
||||||
inputs[0].get_element_type(),
|
OV_PP_ET_LIST(boolean, i32, i64, u32, u64, f16, f32),
|
||||||
inputs[0],
|
logical_not::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LogicalNot::has_evaluate() const {
|
bool LogicalNot::has_evaluate() const {
|
||||||
|
@ -68,15 +68,18 @@ bool MatMul::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(out_shape);
|
outputs[0].set_shape(out_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<matmul::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_MatMul_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
matmul::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
out_shape,
|
outputs[0],
|
||||||
m_transpose_a,
|
inputs[0].get_shape(),
|
||||||
m_transpose_b);
|
inputs[1].get_shape(),
|
||||||
|
out_shape,
|
||||||
|
m_transpose_a,
|
||||||
|
m_transpose_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MatMul::has_evaluate() const {
|
bool MatMul::has_evaluate() const {
|
||||||
|
@ -93,15 +93,18 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(output_shape.get_shape());
|
outputs[0].set_shape(output_shape.get_shape());
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<maxpool::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_MaxPool_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
maxpool::Evaluate,
|
||||||
inputs[0].get_shape(),
|
inputs[0].get_element_type(),
|
||||||
outputs[0].get_shape(),
|
inputs[0],
|
||||||
get_kernel(),
|
outputs[0],
|
||||||
get_strides(),
|
inputs[0].get_shape(),
|
||||||
get_pads_begin(),
|
outputs[0].get_shape(),
|
||||||
get_pads_end());
|
get_kernel(),
|
||||||
|
get_strides(),
|
||||||
|
get_pads_begin(),
|
||||||
|
get_pads_end());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MaxPool::has_evaluate() const {
|
bool MaxPool::has_evaluate() const {
|
||||||
@ -208,18 +211,21 @@ struct Evaluate : element::NoAction<bool> {
|
|||||||
const Shape& pads_end,
|
const Shape& pads_end,
|
||||||
const int64_t axis) {
|
const int64_t axis) {
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64>::apply<EvalByIdxType>(out_indices.get_element_type(),
|
return IF_TYPE_OF(maxpool_eval_by_idx_type,
|
||||||
in.data<const T>(),
|
OV_PP_ET_LIST(i32, i64),
|
||||||
out_values.data<T>(),
|
EvalByIdxType,
|
||||||
out_indices,
|
out_indices.get_element_type(),
|
||||||
in_shape,
|
in.data<const T>(),
|
||||||
out_shape,
|
out_values.data<T>(),
|
||||||
kernel,
|
out_indices,
|
||||||
strides,
|
in_shape,
|
||||||
dilations,
|
out_shape,
|
||||||
pads_begin,
|
kernel,
|
||||||
pads_end,
|
strides,
|
||||||
axis);
|
dilations,
|
||||||
|
pads_begin,
|
||||||
|
pads_end,
|
||||||
|
axis);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -265,18 +271,21 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(output_shape.get_shape());
|
outputs[0].set_shape(output_shape.get_shape());
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<maxpool::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v8_MaxPool_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64),
|
||||||
outputs[0],
|
maxpool::Evaluate,
|
||||||
outputs[1],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
outputs[0].get_shape(),
|
outputs[0],
|
||||||
get_kernel(),
|
outputs[1],
|
||||||
get_strides(),
|
inputs[0].get_shape(),
|
||||||
get_dilations(),
|
outputs[0].get_shape(),
|
||||||
get_pads_begin(),
|
get_kernel(),
|
||||||
get_pads_end(),
|
get_strides(),
|
||||||
get_axis());
|
get_dilations(),
|
||||||
|
get_pads_begin(),
|
||||||
|
get_pads_end(),
|
||||||
|
get_axis());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MaxPool::has_evaluate() const {
|
bool MaxPool::has_evaluate() const {
|
||||||
|
@ -48,13 +48,16 @@ bool Maximum::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<maximum::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Maximum_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
maximum::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Maximum::has_evaluate() const {
|
bool Maximum::has_evaluate() const {
|
||||||
|
@ -49,13 +49,16 @@ bool Minimum::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u8, u16, u32, u64>::apply<minimum::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Minimum_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[1],
|
minimum::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Minimum::has_evaluate() const {
|
bool Minimum::has_evaluate() const {
|
||||||
|
@ -58,10 +58,13 @@ bool Mish::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32>::apply<mish::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v4_Mish_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32),
|
||||||
outputs[0],
|
mish::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Mish::has_evaluate() const {
|
bool Mish::has_evaluate() const {
|
||||||
|
@ -244,13 +244,16 @@ bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<mod::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Mod_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
inputs[1],
|
mod::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Mod::evaluate_lower(TensorVector& outputs) const {
|
bool Mod::evaluate_lower(TensorVector& outputs) const {
|
||||||
|
@ -47,13 +47,16 @@ bool Multiply::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, f64, i32, i64, u32, u64>::apply<multiply::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Multiply_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, f64, i32, i64, u32, u64),
|
||||||
inputs[1],
|
multiply::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Multiply::has_evaluate() const {
|
bool Multiply::has_evaluate() const {
|
||||||
|
@ -42,10 +42,13 @@ bool Negative::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i32, i64>::apply<negative::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Negative_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, i32, i64),
|
||||||
outputs[0],
|
negative::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Negative::has_evaluate() const {
|
bool Negative::has_evaluate() const {
|
||||||
|
@ -26,7 +26,13 @@ struct Evaluate : public element::NoAction<bool> {
|
|||||||
out.set_shape(out_shape);
|
out.set_shape(out_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64>::apply<EvalByOutType>(out.get_element_type(), in_data, out, in_shape);
|
return IF_TYPE_OF(non_zero_out_type,
|
||||||
|
OV_PP_ET_LIST(i32, i64),
|
||||||
|
EvalByOutType,
|
||||||
|
out.get_element_type(),
|
||||||
|
in_data,
|
||||||
|
out,
|
||||||
|
in_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -114,12 +120,14 @@ bool NonZero::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
auto& output = outputs[0];
|
auto& output = outputs[0];
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
const auto& input_shape = input.get_shape();
|
const auto& input_shape = input.get_shape();
|
||||||
return IfTypeOf<boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64>::apply<non_zero::Evaluate>(
|
return IF_TYPE_OF(v3_NonZero_evaluate,
|
||||||
input.get_element_type(),
|
OV_PP_ET_LIST(boolean, bf16, f16, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
input,
|
non_zero::Evaluate,
|
||||||
input_shape,
|
input.get_element_type(),
|
||||||
input_shape.size(),
|
input,
|
||||||
output);
|
input_shape,
|
||||||
|
input_shape.size(),
|
||||||
|
output);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NonZero::has_evaluate() const {
|
bool NonZero::has_evaluate() const {
|
||||||
|
@ -51,13 +51,16 @@ bool NotEqual::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<not_equal::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_NotEqual_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
not_equal::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NotEqual::has_evaluate() const {
|
bool NotEqual::has_evaluate() const {
|
||||||
|
@ -118,15 +118,18 @@ bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
auto& output = outputs[0];
|
auto& output = outputs[0];
|
||||||
output.set_shape(output_shape);
|
output.set_shape(output_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64>::apply<one_hot::Evaluate>(indices.get_element_type(),
|
return IF_TYPE_OF(v1_OneHot_evaluate,
|
||||||
indices,
|
OV_PP_ET_LIST(i32, i64),
|
||||||
indices_shape,
|
one_hot::Evaluate,
|
||||||
static_cast<char*>(output.data()),
|
indices.get_element_type(),
|
||||||
output.get_element_type().size(),
|
indices,
|
||||||
output.get_shape()[axis],
|
indices_shape,
|
||||||
on_value,
|
static_cast<char*>(output.data()),
|
||||||
off_value,
|
output.get_element_type().size(),
|
||||||
axis);
|
output.get_shape()[axis],
|
||||||
|
on_value,
|
||||||
|
off_value,
|
||||||
|
axis);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OneHot::has_evaluate() const {
|
bool OneHot::has_evaluate() const {
|
||||||
|
@ -49,13 +49,16 @@ bool Power::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
out.set_shape(infer_broadcast_shape(this, inputs));
|
out.set_shape(infer_broadcast_shape(this, inputs));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i32, i64, u32, u64>::apply<power::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Power_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, i32, i64, u32, u64),
|
||||||
inputs[1],
|
power::Evaluate,
|
||||||
out,
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
out,
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Power::has_evaluate() const {
|
bool Power::has_evaluate() const {
|
||||||
|
@ -55,12 +55,15 @@ bool PRelu::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
out.set_shape(arg_shape);
|
out.set_shape(arg_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i8>::apply<prelu::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_PRelu_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, i8),
|
||||||
inputs[1],
|
prelu::Evaluate,
|
||||||
out,
|
inputs[0].get_element_type(),
|
||||||
arg_shape,
|
inputs[0],
|
||||||
inputs[1].get_shape());
|
inputs[1],
|
||||||
|
out,
|
||||||
|
arg_shape,
|
||||||
|
inputs[1].get_shape());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PRelu::has_evaluate() const {
|
bool PRelu::has_evaluate() const {
|
||||||
|
@ -119,11 +119,14 @@ bool Range::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
const auto step = get_tensor_data_as<double>(inputs[2])[0];
|
const auto step = get_tensor_data_as<double>(inputs[2])[0];
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<RANGE_ET_LIST>::apply<range::Evaluate>(out.get_element_type(),
|
return IF_TYPE_OF(v4_Range_evaluate,
|
||||||
start,
|
RANGE_ET_LIST,
|
||||||
step,
|
range::Evaluate,
|
||||||
shape_size(out_shape),
|
out.get_element_type(),
|
||||||
out);
|
start,
|
||||||
|
step,
|
||||||
|
shape_size(out_shape),
|
||||||
|
out);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Range::has_evaluate() const {
|
bool Range::has_evaluate() const {
|
||||||
@ -199,11 +202,14 @@ bool Range::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
out.set_shape(out_shape);
|
out.set_shape(out_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<RANGE_ET_LIST>::apply<range::Evaluate>(out.get_element_type(),
|
return IF_TYPE_OF(v0_Range_evaluate,
|
||||||
start,
|
RANGE_ET_LIST,
|
||||||
step,
|
range::Evaluate,
|
||||||
shape_size(out_shape),
|
out.get_element_type(),
|
||||||
out);
|
start,
|
||||||
|
step,
|
||||||
|
shape_size(out_shape),
|
||||||
|
out);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Range::has_evaluate() const {
|
bool Range::has_evaluate() const {
|
||||||
|
@ -48,10 +48,13 @@ bool ReduceL1::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i32, i64>::apply<reduce_l1::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v4_ReduceL1_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, i32, i64),
|
||||||
outputs[0],
|
reduce_l1::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceL1::has_evaluate() const {
|
bool ReduceL1::has_evaluate() const {
|
||||||
|
@ -47,10 +47,13 @@ bool ReduceL2::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32>::apply<reduce_l2::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v4_ReduceL2_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32),
|
||||||
outputs[0],
|
reduce_l2::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceL2::has_evaluate() const {
|
bool ReduceL2::has_evaluate() const {
|
||||||
|
@ -47,10 +47,13 @@ bool ReduceLogicalAnd::evaluate(TensorVector& outputs, const TensorVector& input
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean>::apply<reduce_and::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceLogicalAnd_evaluate,
|
||||||
inputs[0],
|
boolean,
|
||||||
outputs[0],
|
reduce_and::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceLogicalAnd::has_evaluate() const {
|
bool ReduceLogicalAnd::has_evaluate() const {
|
||||||
|
@ -48,10 +48,13 @@ bool ReduceLogicalOr::evaluate(TensorVector& outputs, const TensorVector& inputs
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean>::apply<reduce_or::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceLogicalOr_evaluate,
|
||||||
inputs[0],
|
boolean,
|
||||||
outputs[0],
|
reduce_or::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceLogicalOr::has_evaluate() const {
|
bool ReduceLogicalOr::has_evaluate() const {
|
||||||
|
@ -47,10 +47,13 @@ bool ReduceMax::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<reduce_max::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceMax_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64),
|
||||||
outputs[0],
|
reduce_max::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceMax::has_evaluate() const {
|
bool ReduceMax::has_evaluate() const {
|
||||||
|
@ -45,10 +45,13 @@ bool ReduceMean::evaluate(TensorVector& outputs, const TensorVector& inputs) con
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_mean::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceMean_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
reduce_mean::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceMean::has_evaluate() const {
|
bool ReduceMean::has_evaluate() const {
|
||||||
|
@ -45,10 +45,13 @@ bool ReduceMin::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<reduce_min::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceMin_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i8, i32, i64, u8, u32, u64),
|
||||||
outputs[0],
|
reduce_min::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceMin::has_evaluate() const {
|
bool ReduceMin::has_evaluate() const {
|
||||||
|
@ -57,10 +57,13 @@ bool ReduceProd::evaluate(TensorVector& outputs, const TensorVector& inputs) con
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_prod::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceProd_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
reduce_prod::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceProd::has_evaluate() const {
|
bool ReduceProd::has_evaluate() const {
|
||||||
|
@ -45,10 +45,13 @@ bool ReduceSum::evaluate(TensorVector& outputs, const TensorVector& inputs) cons
|
|||||||
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_sum::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_ReduceSum_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
reduce_sum::Evaluate,
|
||||||
reduction_axes);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
reduction_axes);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReduceSum::has_evaluate() const {
|
bool ReduceSum::has_evaluate() const {
|
||||||
|
@ -42,10 +42,13 @@ bool Relu::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<relu::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Relu_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
relu::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Relu::has_evaluate() const {
|
bool Relu::has_evaluate() const {
|
||||||
|
@ -59,12 +59,14 @@ bool Round::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
auto& out = outputs.front();
|
auto& out = outputs.front();
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, i8, i16, i32, i64, u8, u16, u32, u64, bf16, f16, f32>::apply<round::Evaluate>(
|
return IF_TYPE_OF(v5_Round_evaluate,
|
||||||
arg0.get_element_type(),
|
OV_PP_ET_LIST(boolean, i8, i16, i32, i64, u8, u16, u32, u64, bf16, f16, f32),
|
||||||
arg0,
|
round::Evaluate,
|
||||||
out,
|
arg0.get_element_type(),
|
||||||
shape_size(arg0.get_shape()),
|
arg0,
|
||||||
get_mode());
|
out,
|
||||||
|
shape_size(arg0.get_shape()),
|
||||||
|
get_mode());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Round::has_evaluate() const {
|
bool Round::has_evaluate() const {
|
||||||
|
@ -101,16 +101,19 @@ struct Evaluate : public element::NoAction<bool> {
|
|||||||
|
|
||||||
) {
|
) {
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<EvaluateByIndicesType>(indices.get_element_type(),
|
return IF_TYPE_OF(scatter_el_update_idx_type,
|
||||||
data.data<const DT>(),
|
OV_PP_ET_LIST(i8, i16, i32, i64, u8, u16, u32, u64),
|
||||||
indices,
|
EvaluateByIndicesType,
|
||||||
updates.data<const DT>(),
|
indices.get_element_type(),
|
||||||
output.data<DT>(),
|
data.data<const DT>(),
|
||||||
data_shape,
|
indices,
|
||||||
indices_shape,
|
updates.data<const DT>(),
|
||||||
axis,
|
output.data<DT>(),
|
||||||
reduction,
|
data_shape,
|
||||||
use_init_value);
|
indices_shape,
|
||||||
|
axis,
|
||||||
|
reduction,
|
||||||
|
use_init_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -156,18 +159,21 @@ bool evaluate(TensorVector& outputs,
|
|||||||
const auto& data_shape = data.get_shape();
|
const auto& data_shape = data.get_shape();
|
||||||
const auto& indices_shape = indices.get_shape();
|
const auto& indices_shape = indices.get_shape();
|
||||||
output.set_shape(data_shape);
|
output.set_shape(data_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i16, i32, i64, u32, u64>::apply<scatter_elements_update::Evaluate>(
|
return IF_TYPE_OF(scatter_evaluate,
|
||||||
data.get_element_type(),
|
OV_PP_ET_LIST(boolean, f16, f32, i16, i32, i64, u32, u64),
|
||||||
data,
|
scatter_elements_update::Evaluate,
|
||||||
indices,
|
data.get_element_type(),
|
||||||
updates,
|
data,
|
||||||
output,
|
indices,
|
||||||
data_shape,
|
updates,
|
||||||
indices_shape,
|
output,
|
||||||
axis,
|
data_shape,
|
||||||
reduction,
|
indices_shape,
|
||||||
use_init_value);
|
axis,
|
||||||
|
reduction,
|
||||||
|
use_init_value);
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace scatter_elements_update
|
} // namespace scatter_elements_update
|
||||||
|
@ -24,14 +24,17 @@ struct Evaluate : public element::NoAction<bool> {
|
|||||||
const Shape& indices_shape,
|
const Shape& indices_shape,
|
||||||
const Shape& updates_shape) {
|
const Shape& updates_shape) {
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64>::apply<EvaluateByIndicesType>(indices.get_element_type(),
|
return IF_TYPE_OF(sctter_nd_eval_idx_type,
|
||||||
data.data<const DT>(),
|
OV_PP_ET_LIST(i32, i64),
|
||||||
indices,
|
EvaluateByIndicesType,
|
||||||
updates.data<const DT>(),
|
indices.get_element_type(),
|
||||||
output.data<DT>(),
|
data.data<const DT>(),
|
||||||
data_shape,
|
indices,
|
||||||
indices_shape,
|
updates.data<const DT>(),
|
||||||
updates_shape);
|
output.data<DT>(),
|
||||||
|
data_shape,
|
||||||
|
indices_shape,
|
||||||
|
updates_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -82,14 +85,17 @@ bool ScatterNDUpdate::evaluate(TensorVector& outputs, const TensorVector& inputs
|
|||||||
const auto& updates_shape = updates.get_shape();
|
const auto& updates_shape = updates.get_shape();
|
||||||
output.set_shape(data_shape);
|
output.set_shape(data_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean, f16, f32, i32, i64, u32, u64>::apply<scatter_nd_update::Evaluate>(data.get_element_type(),
|
return IF_TYPE_OF(v3_ScatterNDUpdate_evaluate,
|
||||||
data,
|
OV_PP_ET_LIST(boolean, f16, f32, i32, i64, u32, u64),
|
||||||
indices,
|
scatter_nd_update::Evaluate,
|
||||||
updates,
|
data.get_element_type(),
|
||||||
output,
|
data,
|
||||||
data_shape,
|
indices,
|
||||||
indices_shape,
|
updates,
|
||||||
updates_shape);
|
output,
|
||||||
|
data_shape,
|
||||||
|
indices_shape,
|
||||||
|
updates_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScatterNDUpdate::has_evaluate() const {
|
bool ScatterNDUpdate::has_evaluate() const {
|
||||||
|
@ -44,10 +44,13 @@ bool Sigmoid::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<sigmoid::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Sigmoid_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
sigmoid::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Sigmoid::has_evaluate() const {
|
bool Sigmoid::has_evaluate() const {
|
||||||
|
@ -43,10 +43,13 @@ bool Sign::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<sign::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Sign_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
sign::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Sign::has_evaluate() const {
|
bool Sign::has_evaluate() const {
|
||||||
|
@ -47,10 +47,13 @@ bool Sin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<sin::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Sin_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
sin::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Sin::has_evaluate() const {
|
bool Sin::has_evaluate() const {
|
||||||
|
@ -45,10 +45,13 @@ bool Sinh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<sinh::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Sinh_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
sinh::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Sinh::has_evaluate() const {
|
bool Sinh::has_evaluate() const {
|
||||||
|
@ -68,11 +68,14 @@ bool Softmax::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
const auto& input_shape = inputs[0].get_shape();
|
const auto& input_shape = inputs[0].get_shape();
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, f64>::apply<softmax::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Softmax_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, f64),
|
||||||
outputs[0],
|
softmax::Evaluate,
|
||||||
input_shape,
|
inputs[0].get_element_type(),
|
||||||
AxisSet{m_axis});
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
input_shape,
|
||||||
|
AxisSet{m_axis});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Softmax::has_evaluate() const {
|
bool Softmax::has_evaluate() const {
|
||||||
@ -140,11 +143,14 @@ bool Softmax::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, f64>::apply<softmax::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v8_Softmax_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, f64),
|
||||||
outputs[0],
|
softmax::Evaluate,
|
||||||
input_shape,
|
inputs[0].get_element_type(),
|
||||||
AxisSet{axis});
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
input_shape,
|
||||||
|
AxisSet{axis});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Softmax::has_evaluate() const {
|
bool Softmax::has_evaluate() const {
|
||||||
|
@ -57,10 +57,13 @@ bool SoftPlus::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
const auto count = shape_size(input_shape);
|
const auto count = shape_size(input_shape);
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32>::apply<softplus::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v4_SoftPlus_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32),
|
||||||
outputs[0],
|
softplus::Evaluate,
|
||||||
count);
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
count);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SoftPlus::has_evaluate() const {
|
bool SoftPlus::has_evaluate() const {
|
||||||
|
@ -75,10 +75,13 @@ bool SoftSign::evaluate(TensorVector& outputs,
|
|||||||
const auto& input_shape = inputs[0].get_shape();
|
const auto& input_shape = inputs[0].get_shape();
|
||||||
outputs[0].set_shape(input_shape);
|
outputs[0].set_shape(input_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, f64>::apply<softsign::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v9_SoftSign_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, f64),
|
||||||
outputs[0],
|
softsign::Evaluate,
|
||||||
shape_size(input_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(input_shape));
|
||||||
}
|
}
|
||||||
} // namespace v9
|
} // namespace v9
|
||||||
} // namespace op
|
} // namespace op
|
||||||
|
@ -41,10 +41,13 @@ bool Sqrt::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
const auto& in_shape = inputs[0].get_shape();
|
const auto& in_shape = inputs[0].get_shape();
|
||||||
outputs[0].set_shape(in_shape);
|
outputs[0].set_shape(in_shape);
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, f64, i32, i64, u32, u64>::apply<sqrt::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Sqrt_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, f64, i32, i64, u32, u64),
|
||||||
outputs[0],
|
sqrt::Evaluate,
|
||||||
shape_size(in_shape));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(in_shape));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Sqrt::has_evaluate() const {
|
bool Sqrt::has_evaluate() const {
|
||||||
|
@ -48,13 +48,16 @@ bool Subtract::evaluate(TensorVector& outputs, const TensorVector& inputs) const
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<bf16, f16, f32, i8, i32, i64, u8, u32, u64>::apply<subtract::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v1_Subtract_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(bf16, f16, f32, i8, i32, i64, u8, u32, u64),
|
||||||
inputs[1],
|
subtract::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
get_autob());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Subtract::has_evaluate() const {
|
bool Subtract::has_evaluate() const {
|
||||||
|
@ -88,11 +88,14 @@ bool Swish::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
const auto& arg1 = inputs.size() == 2 ? inputs[1] : Tensor();
|
const auto& arg1 = inputs.size() == 2 ? inputs[1] : Tensor();
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32>::apply<swish::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v4_Swish_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32),
|
||||||
arg1,
|
swish::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0],
|
||||||
|
arg1,
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Swish::has_evaluate() const {
|
bool Swish::has_evaluate() const {
|
||||||
|
@ -45,10 +45,13 @@ bool Tan::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<tan::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Tan_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
tan::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Tan::has_evaluate() const {
|
bool Tan::has_evaluate() const {
|
||||||
|
@ -46,10 +46,13 @@ bool Tanh::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
|||||||
outputs[0].set_shape(inputs[0].get_shape());
|
outputs[0].set_shape(inputs[0].get_shape());
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64, u32, u64, f16, f32>::apply<tanh::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(v0_Tanh_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
tanh::Evaluate,
|
||||||
shape_size(inputs[0].get_shape()));
|
inputs[0].get_element_type(),
|
||||||
|
inputs[0],
|
||||||
|
outputs[0],
|
||||||
|
shape_size(inputs[0].get_shape()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Tanh::has_evaluate() const {
|
bool Tanh::has_evaluate() const {
|
||||||
|
@ -59,16 +59,19 @@ struct Evaluate : public element::NoAction<bool> {
|
|||||||
const bool compute_max,
|
const bool compute_max,
|
||||||
const TopKSortType sort) {
|
const TopKSortType sort) {
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<i32, i64>::apply<EvalByIdxType>(out_indices.get_element_type(),
|
return IF_TYPE_OF(topk_eval_by_idx_type,
|
||||||
in.data<const T>(),
|
OV_PP_ET_LIST(i32, i64),
|
||||||
out_values.data<T>(),
|
EvalByIdxType,
|
||||||
out_indices,
|
out_indices.get_element_type(),
|
||||||
in.get_shape(),
|
in.data<const T>(),
|
||||||
out_shape,
|
out_values.data<T>(),
|
||||||
axis,
|
out_indices,
|
||||||
out_shape[axis],
|
in.get_shape(),
|
||||||
compute_max,
|
out_shape,
|
||||||
sort);
|
axis,
|
||||||
|
out_shape[axis],
|
||||||
|
compute_max,
|
||||||
|
sort);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -116,14 +119,17 @@ bool evaluate(const util::TopKBase* const node, TensorVector& outputs, const Ten
|
|||||||
}
|
}
|
||||||
|
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<topk::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(topk_evaluate,
|
||||||
inputs[0],
|
OV_PP_ET_LIST(f16, f32, i32, i64, u32, u64),
|
||||||
outputs[0],
|
topk::Evaluate,
|
||||||
outputs[1],
|
inputs[0].get_element_type(),
|
||||||
output_shape,
|
inputs[0],
|
||||||
axis,
|
outputs[0],
|
||||||
(node->get_mode() == ov::op::TopKMode::MAX),
|
outputs[1],
|
||||||
node->get_sort_type());
|
output_shape,
|
||||||
|
axis,
|
||||||
|
(node->get_mode() == ov::op::TopKMode::MAX),
|
||||||
|
node->get_sort_type());
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace topk
|
} // namespace topk
|
||||||
|
@ -45,13 +45,16 @@ bool evaluate(const Node* const op, TensorVector& outputs, const TensorVector& i
|
|||||||
|
|
||||||
outputs[0].set_shape(infer_broadcast_shape(op, inputs));
|
outputs[0].set_shape(infer_broadcast_shape(op, inputs));
|
||||||
using namespace ov::element;
|
using namespace ov::element;
|
||||||
return IfTypeOf<boolean>::apply<logxor::Evaluate>(inputs[0].get_element_type(),
|
return IF_TYPE_OF(Xor_evaluate,
|
||||||
inputs[0],
|
boolean,
|
||||||
inputs[1],
|
logxor::Evaluate,
|
||||||
outputs[0],
|
inputs[0].get_element_type(),
|
||||||
inputs[0].get_shape(),
|
inputs[0],
|
||||||
inputs[1].get_shape(),
|
inputs[1],
|
||||||
op->get_autob());
|
outputs[0],
|
||||||
|
inputs[0].get_shape(),
|
||||||
|
inputs[1].get_shape(),
|
||||||
|
op->get_autob());
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace logxor
|
} // namespace logxor
|
||||||
|
Loading…
Reference in New Issue
Block a user