[core]Migrate Minimum operator to new API (#20597)

* Migrate Minimum op to new API

* Refactor evaluates to reduce binary size
- add infer_broadcast_shape, get shapes from tensors reduce OV_ASSERT
- refactor Evaluate structures to reduce binary size

---------

Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
This commit is contained in:
Pawel Raasz 2023-10-24 14:48:09 +02:00 committed by GitHub
parent e977a6ed9b
commit 5ffde7d8d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 113 additions and 119 deletions

View File

@ -29,9 +29,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -4,7 +4,7 @@
#pragma once
#include <cstddef>
#include <algorithm>
#include "openvino/core/shape.hpp"
#include "openvino/op/util/attr_types.hpp"
@ -12,11 +12,16 @@
namespace ov {
namespace reference {
namespace func {
template <class T>
T min(const T a, const T b) {
return std::min(a, b);
}
} // namespace func
template <typename T>
void minimum(const T* arg0, const T* arg1, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = arg0[i] < arg1[i] ? arg0[i] : arg1[i];
}
std::transform(arg0, std::next(arg0, count), arg1, out, func::min<T>);
}
template <typename T>
@ -26,9 +31,7 @@ void minimum(const T* arg0,
const Shape& arg0_shape,
const Shape& arg1_shape,
const op::AutoBroadcastSpec& broadcast_spec) {
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T {
return x < y ? x : y;
});
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, func::min<T>);
}
} // namespace reference
} // namespace ov

View File

@ -419,6 +419,17 @@ ov::optional<TResult> get_input_bounds(const ov::Node* op, size_t port, const IT
* @return Result shape from inputs with applied broadcast specification.
*/
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::Shape& first, const ov::Shape& second);
/**
* @brief Inference broadcast shape from input tensor shapes for element wise operator
* according to broadcast specification stored in operator.
*
* @param op Pointer to operator.
* @param inputs Tensors vector to get theirs shapes.
*
* @return Result shape from input tensors shape with applied broadcast specification.
*/
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::TensorVector& inputs);
} // namespace op
/**

View File

@ -5,6 +5,7 @@
#include "utils.hpp"
#include "eltwise_shape_inference.hpp"
#include "openvino/core/validation_util.hpp"
namespace ov {
namespace op {
@ -12,5 +13,9 @@ namespace op {
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::Shape& first, const ov::Shape& second) {
return eltwise_shape_infer(op, std::vector<ov::PartialShape>{first, second}).front().to_shape();
}
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::TensorVector& inputs) {
return eltwise_shape_infer(op, ov::util::get_tensors_partial_shapes(inputs)).front().to_shape();
}
} // namespace op
} // namespace ov

View File

@ -19,14 +19,11 @@ struct Evaluate : element::NoAction<bool> {
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const Shape& shape0,
const Shape& shape1,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::add(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
in0.get_shape(),
in1.get_shape(),
broadcast_spec);
reference::add(in0.data<const T>(), in1.data<const T>(), out.data<T>(), shape0, shape1, broadcast_spec);
return true;
}
};
@ -48,15 +45,16 @@ std::shared_ptr<Node> Add::clone_with_new_inputs(const OutputVector& new_args) c
bool Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v1_Add_evaluate);
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<add::Evaluate>(
inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}

View File

@ -25,19 +25,16 @@ std::shared_ptr<Node> LogicalAnd::clone_with_new_inputs(const OutputVector& new_
bool LogicalAnd::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_LogicalAnd_evaluate);
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto& shape_0 = inputs[0].get_shape();
const auto& shape_1 = inputs[1].get_shape();
outputs[0].set_shape(infer_broadcast_shape(this, shape_0, shape_1));
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
if (inputs[0].get_element_type() == element::boolean) {
using T = fundamental_type_for<element::boolean>;
reference::logical_and(inputs[0].data<const T>(),
inputs[1].data<const T>(),
outputs[0].data<T>(),
shape_0,
shape_1,
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return true;
} else {

View File

@ -26,19 +26,16 @@ std::shared_ptr<Node> LogicalOr::clone_with_new_inputs(const OutputVector& new_a
bool LogicalOr::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_LogicalOr_evaluate);
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto& shape_0 = inputs[0].get_shape();
const auto& shape_1 = inputs[1].get_shape();
outputs[0].set_shape(infer_broadcast_shape(this, shape_0, shape_1));
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
if (inputs[0].get_element_type() == element::boolean) {
using T = fundamental_type_for<element::boolean>;
reference::logical_or(inputs[0].data<const T>(),
inputs[1].data<const T>(),
outputs[0].data<T>(),
shape_0,
shape_1,
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
return true;
} else {

View File

@ -2,92 +2,78 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/minimum.hpp"
#include <memory>
#include "openvino/op/minimum.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/less.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/type/element_type.hpp"
#include "openvino/reference/minimum.hpp"
#include "utils.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
OPENVINO_SUPPRESS_DEPRECATED_START
namespace minimumop {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec) {
ov::reference::minimum(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<ET>(),
arg0->get_shape(),
arg1->get_shape(),
broadcast_spec);
return true;
}
namespace minimum {
bool evaluate_minimum(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec) {
bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type()) {
OPENVINO_TYPE_CASE(evaluate_minimum, i32, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, i64, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, u8, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, u16, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, u32, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, u64, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, f16, arg0, arg1, out, broadcast_spec);
OPENVINO_TYPE_CASE(evaluate_minimum, f32, arg0, arg1, out, broadcast_spec);
default:
rc = false;
break;
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
template <element::Type_t ET, class T = fundamental_type_for<ET>>
static result_type visit(const Tensor& arg0,
const Tensor& arg1,
Tensor& out,
const Shape& shape0,
const Shape& shape1,
const AutoBroadcastSpec& broadcast_spec) {
reference::minimum(arg0.data<const T>(), arg1.data<const T>(), out.data<T>(), shape0, shape1, broadcast_spec);
return true;
}
return rc;
}
} // namespace
} // namespace minimumop
};
} // namespace minimum
// ------------------------------ v1 -------------------------------------------
op::v1::Minimum::Minimum(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
namespace v1 {
Minimum::Minimum(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::Minimum::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Minimum::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_Minimum_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::Minimum>(new_args.at(0), new_args.at(1), this->get_autob());
return std::make_shared<Minimum>(new_args.at(0), new_args.at(1), get_autob());
}
bool op::v1::Minimum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Minimum::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_Minimum_evaluate);
return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
OPENVINO_ASSERT(outputs.size() == 1);
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u8, u16, u32, u64>::apply<minimum::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}
bool op::v1::Minimum::has_evaluate() const {
bool Minimum::has_evaluate() const {
OV_OP_SCOPE(v1_Minimum_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::f16:
case element::f32:
case element::i32:
case element::i64:
case element::u8:
case element::u16:
case element::u32:
case element::u64:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -37,14 +37,11 @@ struct Evaluate : ov::element::NoAction<bool> {
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const Shape& shape0,
const Shape& shape1,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::mod(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
in0.get_shape(),
in1.get_shape(),
broadcast_spec);
reference::mod(in0.data<const T>(), in1.data<const T>(), out.data<T>(), shape0, shape1, broadcast_spec);
return true;
}
};
@ -244,14 +241,15 @@ std::shared_ptr<Node> Mod::clone_with_new_inputs(const OutputVector& new_args) c
bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v1_Mod_evaluate);
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<mod::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}

View File

@ -19,14 +19,11 @@ struct Evaluate : element::NoAction<bool> {
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const Shape& shape0,
const Shape& shape1,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::subtract(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
in0.get_shape(),
in1.get_shape(),
broadcast_spec);
reference::subtract(in0.data<const T>(), in1.data<const T>(), out.data<T>(), shape0, shape1, broadcast_spec);
return true;
}
};
@ -48,14 +45,15 @@ std::shared_ptr<Node> Subtract::clone_with_new_inputs(const OutputVector& new_ar
bool Subtract::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_Subtract_evaluate);
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i32, i64, u8, u32, u64>::apply<subtract::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob());
}

View File

@ -21,13 +21,15 @@ struct Evaluate : element::NoAction<bool> {
static result_type visit(const Tensor& arg0,
const Tensor& arg1,
Tensor& out,
const Shape& shape0,
const Shape& shape1,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::logical_xor(arg0.data<const T>(),
arg1.data<const T>(),
out.data<T>(),
arg0.get_shape(),
arg1.get_shape(),
shape0,
shape1,
broadcast_spec);
return true;
}
@ -40,14 +42,15 @@ bool input_supported_type(const element::Type& et) {
bool evaluate(const Node* const op, TensorVector& outputs, const TensorVector& inputs) {
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
outputs[0].set_shape(infer_broadcast_shape(op, inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(op, inputs));
using namespace ov::element;
return IfTypeOf<boolean>::apply<logxor::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
op->get_autob());
}
} // namespace

View File

@ -14,9 +14,9 @@ template <element::Type_t ET>
bool evaluate(const std::shared_ptr<ov::op::v13::BitwiseAnd>& node,
ov::TensorVector& outputs,
const ov::TensorVector& inputs) {
OPENVINO_ASSERT(inputs.size() == 2);
OPENVINO_ASSERT(outputs.size() == 1);
outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs));
using T = typename ov::element_type_traits<ET>::value_type;
ov::reference::bitwise_and(inputs[0].data<const T>(),
inputs[1].data<const T>(),

View File

@ -14,9 +14,9 @@ template <element::Type_t ET>
bool evaluate(const std::shared_ptr<ov::op::v13::BitwiseOr>& node,
ov::TensorVector& outputs,
const ov::TensorVector& inputs) {
OPENVINO_ASSERT(inputs.size() == 2);
OPENVINO_ASSERT(outputs.size() == 1);
outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs));
using T = typename ov::element_type_traits<ET>::value_type;
ov::reference::bitwise_or(inputs[0].data<const T>(),
inputs[1].data<const T>(),

View File

@ -14,9 +14,9 @@ template <element::Type_t ET>
bool evaluate(const std::shared_ptr<ov::op::v13::BitwiseXor>& node,
ov::TensorVector& outputs,
const ov::TensorVector& inputs) {
OPENVINO_ASSERT(inputs.size() == 2);
OPENVINO_ASSERT(outputs.size() == 1);
outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape()));
outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs));
using T = typename ov::element_type_traits<ET>::value_type;
ov::reference::bitwise_xor(inputs[0].data<const T>(),
inputs[1].data<const T>(),