[core]Api 2.0/migrate Subtract op to new API (#20108)

* Migrate Subtract to new API

* Sync has_evaluate precision with evaluate
This commit is contained in:
Pawel Raasz 2023-10-03 10:55:20 +02:00 committed by GitHub
parent 9a9c74f6c6
commit f8881dd2a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 67 additions and 73 deletions

View File

@ -27,9 +27,7 @@ public:
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -4,31 +4,36 @@
#pragma once
#include <cstddef>
#include <algorithm>
#include <functional>
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/shape.hpp"
#include "openvino/reference/autobroadcast_binop.hpp"
namespace ov {
namespace reference {
template <typename T>
template <class T>
void subtract(const T* arg0, const T* arg1, T* out, size_t count) {
for (size_t i = 0; i < count; i++) {
out[i] = arg0[i] - arg1[i];
}
std::transform(arg0, std::next(arg0, count), arg1, out, std::minus<T>());
}
template <typename T>
/**
* @brief Reference implementation of binary elementwise Subtract operator.
*
* @param arg0 Pointer to input 0 data.
* @param arg1 Pointer to input 1 data.
* @param out Pointer to output data.
* @param arg_shape0 Input 0 shape.
* @param arg_shape1 Input 1 shape.
* @param broadcast_spec Broadcast specification mode.
*/
template <class T>
void subtract(const T* arg0,
const T* arg1,
T* out,
const Shape& arg0_shape,
const Shape& arg1_shape,
const op::AutoBroadcastSpec& broadcast_spec) {
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T {
return x - y;
});
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::minus<T>());
}
} // namespace reference
} // namespace ov

View File

@ -2,89 +2,80 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/subtract.hpp"
#include "openvino/op/subtract.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/negative.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/reference/subtract.hpp"
#include "utils.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
namespace ov {
namespace op {
namespace subtract {
namespace {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec) {
ov::reference::subtract(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<ET>(),
arg0->get_shape(),
arg1->get_shape(),
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::subtract(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
in0.get_shape(),
in1.get_shape(),
broadcast_spec);
return true;
}
bool evaluate_subtract(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec) {
bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_subtract, i8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, u8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, f32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_subtract, bf16, arg0, arg1, out, broadcast_spec);
default:
rc = false;
break;
}
return rc;
}
} // namespace
};
} // namespace subtract
// ------------------------------- v1 ------------------------------------------
op::v1::Subtract::Subtract(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
namespace v1 {
Subtract::Subtract(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::Subtract::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Subtract::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_Subtract_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::Subtract>(new_args.at(0), new_args.at(1), this->get_autob());
return std::make_shared<Subtract>(new_args.at(0), new_args.at(1), get_autob());
}
bool op::v1::Subtract::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Subtract::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_Subtract_evaluate);
return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
using namespace ov::element;
return IfTypeOf<bf16, f16, f32, i8, i32, i64, u8, u32, u64>::apply<subtract::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
get_autob());
}
bool op::v1::Subtract::has_evaluate() const {
bool Subtract::has_evaluate() const {
OV_OP_SCOPE(v1_Subtract_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case ngraph::element::bf16:
case element::bf16:
case element::f16:
case element::f32:
case element::i8:
case element::i32:
case element::i64:
case element::u8:
case element::u32:
case element::u64:
return true;
default:
break;
}
return false;
}
}
} // namespace v1
} // namespace op
} // namespace ov