[core] Migrate Divide operator to new API (#20766)

* Use ov:: namespace

* Drop HostTensor

* Use ov::util::make_tensor_of_max_value

instead of ngraph::get_constant_max_of_type

* Use ov::util::make_tensor_of_min_value instead of

ngraph::get_constant_min_of_type

* Refactor get_constant_min_of_type
This commit is contained in:
Tomasz Jankowski 2023-11-03 10:35:43 +01:00 committed by GitHub
parent f890bf7930
commit 3386b85c08
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 171 additions and 165 deletions

View File

@ -58,6 +58,11 @@ OPENVINO_API std::shared_ptr<op::v0::Constant> get_constant_from_source(const Ou
/// \return Tensor with maximum value.
Tensor make_tensor_of_max_value(const element::Type_t et);
/// \brief Make scalar tensor which stores minimum value of ov::element::Type.
/// \param et Element type to get its minimum.
/// \return Tensor with minimum value.
Tensor make_tensor_of_min_value(const element::Type_t et);
/// \brief Apply auto padding to padding_above and padding_below inputs
/// if all needed informations are known.
///

View File

@ -45,9 +45,7 @@ public:
}
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;

View File

@ -2,88 +2,72 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/divide.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/divide.hpp"
#include "bound_evaluate.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/and.hpp"
#include "ngraph/op/equal.hpp"
#include "ngraph/op/less.hpp"
#include "ngraph/op/not.hpp"
#include "ngraph/op/or.hpp"
#include "ngraph/op/select.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/less.hpp"
#include "openvino/op/logical_and.hpp"
#include "openvino/op/logical_or.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/select.hpp"
#include "openvino/reference/divide.hpp"
#include "utils.hpp"
#include "validation_util.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
namespace ov {
namespace op {
namespace v1 {
namespace divide {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
using ov::op::v0::Constant;
using ov::op::v0::Parameter;
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
template <element::Type_t ET, class T = fundamental_type_for<ET>>
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const Shape& shape0,
const Shape& shape1,
const op::AutoBroadcastSpec& broadcast_spec,
bool pythondiv) {
ov::reference::divide(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<ET>(),
arg0->get_shape(),
arg1->get_shape(),
const bool pythondiv) {
reference::divide(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
shape0,
shape1,
broadcast_spec,
pythondiv);
return true;
}
bool evaluate_divide(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec,
bool pythondiv) {
bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type()) {
OPENVINO_TYPE_CASE(evaluate_divide, i32, arg0, arg1, out, broadcast_spec, pythondiv);
OPENVINO_TYPE_CASE(evaluate_divide, i64, arg0, arg1, out, broadcast_spec, pythondiv);
OPENVINO_TYPE_CASE(evaluate_divide, u32, arg0, arg1, out, broadcast_spec, pythondiv);
OPENVINO_TYPE_CASE(evaluate_divide, u64, arg0, arg1, out, broadcast_spec, pythondiv);
OPENVINO_TYPE_CASE(evaluate_divide, f16, arg0, arg1, out, broadcast_spec, pythondiv);
OPENVINO_TYPE_CASE(evaluate_divide, f32, arg0, arg1, out, broadcast_spec, pythondiv);
OPENVINO_TYPE_CASE(evaluate_divide, bf16, arg0, arg1, out, broadcast_spec, pythondiv);
default:
rc = false;
break;
}
return rc;
}
};
ov::Tensor equality_mask(const ov::Tensor& tensor, const shared_ptr<op::Constant>& constant) {
auto mask_out = ov::TensorVector{{element::boolean, tensor.get_shape()}};
Tensor equality_mask(const Tensor& lhs, const Tensor& rhs) {
auto mask_out = TensorVector{{element::boolean, lhs.get_shape()}};
auto c_tensor = ov::Tensor(constant->get_element_type(), constant->get_shape());
memcpy(c_tensor.data(), constant->get_data_ptr(), c_tensor.get_byte_size());
const auto& param = std::make_shared<op::Parameter>(tensor.get_element_type(), tensor.get_shape());
op::v1::Equal(param, constant).evaluate(mask_out, ov::TensorVector{tensor, c_tensor});
const auto lhs_node = std::make_shared<Parameter>(lhs.get_element_type(), lhs.get_shape());
const auto rhs_node = std::make_shared<Parameter>(rhs.get_element_type(), rhs.get_shape());
Equal(lhs_node, rhs_node).evaluate(mask_out, TensorVector{lhs, rhs});
return mask_out.front();
}
ov::Tensor or_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
auto logical_or = op::v1::LogicalOr(std::make_shared<op::Parameter>(lhs.get_element_type(), lhs.get_shape()),
std::make_shared<op::Parameter>(rhs.get_element_type(), rhs.get_shape()),
ngraph::op::AutoBroadcastType::NUMPY);
Tensor or_tensor(const Tensor& lhs, const Tensor& rhs) {
auto logical_or = LogicalOr(std::make_shared<Parameter>(lhs.get_element_type(), lhs.get_shape()),
std::make_shared<Parameter>(rhs.get_element_type(), rhs.get_shape()),
AutoBroadcastType::NUMPY);
auto outs = ov::TensorVector{{lhs.get_element_type(), logical_or.get_output_shape(0)}};
logical_or.evaluate(outs, ov::TensorVector{lhs, rhs});
auto outs = TensorVector{{lhs.get_element_type(), logical_or.get_output_shape(0)}};
logical_or.evaluate(outs, TensorVector{lhs, rhs});
return outs.front();
}
bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_upper) {
bool evaluate_bound(const Node* node, TensorVector& output_values, bool is_upper) {
// for positive arg2 divide will have limits [low/up , up/low]
// for negative arg2 limits for divide will be [up/low, low/up]
// for arg2 range with both positive and negative values, divide can give any result [-inf, inf]
@ -96,109 +80,102 @@ bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_u
OPENVINO_ASSERT(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()),
"Argument shapes in divide operation are inconsistent.");
auto input1_low = ov::evaluate_lower_bound(input1);
const auto input1_low = evaluate_lower_bound(input1);
if (!input1_low)
return false;
auto input1_up = ov::evaluate_upper_bound(input1);
const auto input1_up = evaluate_upper_bound(input1);
if (!input1_up)
return false;
auto input2_low = ov::evaluate_lower_bound(input2);
const auto input2_low = evaluate_lower_bound(input2);
if (!input2_low)
return false;
auto input2_up = ov::evaluate_upper_bound(input2);
const auto input2_up = evaluate_upper_bound(input2);
if (!input2_up)
return false;
auto zeros_const = op::Constant::create(input2.get_element_type(), {}, {0});
const auto zero_t = ov::Tensor(input2.get_element_type(), Shape{});
const auto zeros_const = Constant::create(input2.get_element_type(), {}, {0});
const auto zero_t = Tensor(input2.get_element_type(), Shape{});
memcpy(zero_t.data(), zeros_const->get_data_ptr(), zero_t.get_byte_size());
OPENVINO_SUPPRESS_DEPRECATED_START
auto max_constant = get_constant_max_of_type(input2.get_element_type());
auto dynamic_mask = or_tensor(equality_mask(input1_up, max_constant), equality_mask(input2_up, max_constant));
OPENVINO_SUPPRESS_DEPRECATED_END
const auto max_value = ov::util::make_tensor_of_max_value(input2.get_element_type());
const auto dynamic_mask = or_tensor(equality_mask(input1_up, max_value), equality_mask(input2_up, max_value));
// mask to find out positive values for arg2
auto less_up_outputs = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto less_up_outputs = TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_positive_up_mask = less_up_outputs.front();
bool status = op::v1::Less().evaluate(less_up_outputs, ov::TensorVector{zero_t, input2_up});
bool status = Less().evaluate(less_up_outputs, TensorVector{zero_t, input2_up});
if (!status)
return status;
// mask to find out negative values for arg2
auto less_low_outputs = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto less_low_outputs = TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_negative_low_mask = less_low_outputs.front();
status = op::v1::Less().evaluate(less_low_outputs, {input2_low, zero_t});
status = Less().evaluate(less_low_outputs, {input2_low, zero_t});
if (!status)
return status;
// mask to find out ranges around 0 for arg2
auto logical_and_up_outputs = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto logical_and_up_outputs = TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_low_negative_up_positive_mask = logical_and_up_outputs.front();
status = op::v1::LogicalAnd().evaluate(logical_and_up_outputs, {input2_negative_low_mask, input2_positive_up_mask});
status = LogicalAnd().evaluate(logical_and_up_outputs, {input2_negative_low_mask, input2_positive_up_mask});
if (!status)
return status;
auto value1_outs = ov::TensorVector{{input1.get_element_type(), input_shape.get_shape()}};
auto value1_outs = TensorVector{{input1.get_element_type(), input_shape.get_shape()}};
auto& value1 = value1_outs.front();
auto value2_outs = ov::TensorVector{{input2.get_element_type(), input2.get_shape()}};
auto value2_outs = TensorVector{{input2.get_element_type(), input2.get_shape()}};
auto& value2 = value2_outs.front();
if (!is_upper) {
status = op::v1::Select().evaluate(value1_outs, {input2_positive_up_mask, input1_low, input1_up});
status = Select().evaluate(value1_outs, {input2_positive_up_mask, input1_low, input1_up});
if (!status)
return status;
status = op::v1::Select().evaluate(value2_outs, {input2_positive_up_mask, input2_up, input2_low});
status = Select().evaluate(value2_outs, {input2_positive_up_mask, input2_up, input2_low});
if (!status)
return status;
status = node->evaluate(output_values, ov::TensorVector{value1, value2});
status = node->evaluate(output_values, TensorVector{value1, value2});
if (!status)
return status;
// replace values where zeros inside range of second arg to maximum values
OPENVINO_SUPPRESS_DEPRECATED_START
auto output_minimum_value = get_constant_min_of_type(output_values[0].get_element_type());
OPENVINO_SUPPRESS_DEPRECATED_END
if (output_minimum_value == nullptr)
const auto output_min_value = ov::util::make_tensor_of_min_value(output_values[0].get_element_type());
if (!output_min_value)
return false;
auto out_min_v = ov::Tensor(output_minimum_value->get_element_type(), output_minimum_value->get_shape());
memcpy(out_min_v.data(), output_minimum_value->get_data_ptr(), out_min_v.get_byte_size());
status = op::v1::Select().evaluate(output_values,
{input2_low_negative_up_positive_mask, out_min_v, output_values[0]});
status = Select().evaluate(output_values,
{input2_low_negative_up_positive_mask, output_min_value, output_values[0]});
if (!status)
return status;
status = op::v1::Select().evaluate(output_values, {dynamic_mask, zero_t, output_values[0]});
status = Select().evaluate(output_values, {dynamic_mask, zero_t, output_values[0]});
if (!status)
return status;
} else {
status = op::v1::Select().evaluate(value1_outs, {input2_positive_up_mask, input1_up, input1_low});
status = Select().evaluate(value1_outs, {input2_positive_up_mask, input1_up, input1_low});
if (!status)
return status;
status = op::v1::Select().evaluate(value2_outs, {input2_positive_up_mask, input2_low, input2_up});
status = Select().evaluate(value2_outs, {input2_positive_up_mask, input2_low, input2_up});
if (!status)
return status;
// create mask where zeros in the second argument are placed
auto eq_zero_mask = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto eq_zero_mask = TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_zeros_mask = eq_zero_mask.front();
bool status = op::v1::Equal().evaluate(eq_zero_mask, {value2, zero_t});
bool status = Equal().evaluate(eq_zero_mask, {value2, zero_t});
if (!status)
return status;
// replace zeros by 1 values to get result of divide for other values of arguments
auto ones = op::Constant::create(input2.get_element_type(), input2.get_shape(), {1});
auto ones_t = ov::Tensor(ones->get_element_type(), ones->get_shape());
const auto ones = Constant::create(input2.get_element_type(), input2.get_shape(), {1});
const auto ones_t = Tensor(ones->get_element_type(), ones->get_shape());
memcpy(ones_t.data(), ones->get_data_ptr(), ones_t.get_byte_size());
status = op::v1::Select().evaluate(value2_outs, {input2_zeros_mask, ones_t, value2});
status = Select().evaluate(value2_outs, {input2_zeros_mask, ones_t, value2});
if (!status)
return status;
@ -207,27 +184,22 @@ bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_u
return status;
// replace values where zeros were found in the second argument to maximum values
OPENVINO_SUPPRESS_DEPRECATED_START
auto output_maximum_value = get_constant_max_of_type(output_values[0].get_element_type());
OPENVINO_SUPPRESS_DEPRECATED_END
if (output_maximum_value == nullptr)
const auto out_max_value = ov::util::make_tensor_of_max_value(output_values[0].get_element_type());
if (!out_max_value)
return false;
auto out_max_v = ov::Tensor(output_maximum_value->get_element_type(), output_maximum_value->get_shape());
memcpy(out_max_v.data(), output_maximum_value->get_data_ptr(), out_max_v.get_byte_size());
status = op::v1::Select().evaluate(output_values, {input2_zeros_mask, out_max_v, output_values[0]});
status = Select().evaluate(output_values, {input2_zeros_mask, out_max_value, output_values[0]});
if (!status)
return status;
// replace values where zeros inside [low, ip] values range of second arg to maximum values
status = op::v1::Select().evaluate(output_values,
{input2_low_negative_up_positive_mask, out_max_v, output_values[0]});
status =
Select().evaluate(output_values, {input2_low_negative_up_positive_mask, out_max_value, output_values[0]});
if (!status)
return status;
// in case input elements were dynamic we replace them with zero
status = op::v1::Select().evaluate(output_values, {dynamic_mask, out_max_v, output_values[0]});
status = Select().evaluate(output_values, {dynamic_mask, out_max_value, output_values[0]});
if (!status)
return status;
}
@ -236,14 +208,12 @@ bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_u
} // namespace
} // namespace divide
// ------------------------------ v1 -------------------------------------------
op::v1::Divide::Divide(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
Divide::Divide(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) {
constructor_validate_and_infer_types();
}
op::v1::Divide::Divide(const Output<Node>& arg0,
Divide::Divide(const Output<Node>& arg0,
const Output<Node>& arg1,
bool pythondiv,
const AutoBroadcastSpec& auto_broadcast)
@ -252,45 +222,60 @@ op::v1::Divide::Divide(const Output<Node>& arg0,
constructor_validate_and_infer_types();
}
bool op::v1::Divide::visit_attributes(AttributeVisitor& visitor) {
bool Divide::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v1_Divide_visit_attributes);
BinaryElementwiseArithmetic::visit_attributes(visitor);
visitor.on_attribute("m_pythondiv", m_pythondiv);
return true;
}
shared_ptr<Node> op::v1::Divide::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Divide::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_Divide_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::Divide>(new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob());
return std::make_shared<Divide>(new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob());
}
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool Divide::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_Divide_evaluate);
return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
OPENVINO_ASSERT(outputs.size() == 1);
outputs[0].set_shape(infer_broadcast_shape(this, inputs));
using namespace ov::element;
return IfTypeOf<i32, i64, u32, u64, f16, bf16, f32>::apply<divide::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
inputs[0].get_shape(),
inputs[1].get_shape(),
get_autob(),
is_pythondiv());
return true;
}
bool op::v1::Divide::has_evaluate() const {
bool Divide::has_evaluate() const {
OV_OP_SCOPE(v1_Divide_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::bf16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::bf16:
case element::f32:
return true;
default:
break;
}
return false;
}
}
bool ov::op::v1::Divide::evaluate_lower(TensorVector& outputs) const {
bool Divide::evaluate_lower(TensorVector& outputs) const {
return divide::evaluate_bound(this, outputs, false);
}
bool ov::op::v1::Divide::evaluate_upper(TensorVector& outputs) const {
bool Divide::evaluate_upper(TensorVector& outputs) const {
return divide::evaluate_bound(this, outputs, true);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -916,32 +916,8 @@ std::shared_ptr<op::v0::Constant> get_constant_max_of_type(element::Type_t t) {
}
std::shared_ptr<op::v0::Constant> get_constant_min_of_type(element::Type_t t) {
#define OPENVINO_TYPE_TO_MIN_CONST(t) \
case t: \
return ov::op::v0::Constant::create( \
t, \
{}, \
{std::numeric_limits<typename element_type_traits<t>::value_type>::min()}); \
break
switch (t) {
OPENVINO_TYPE_TO_MIN_CONST(element::boolean);
OPENVINO_TYPE_TO_MIN_CONST(element::bf16);
OPENVINO_TYPE_TO_MIN_CONST(element::f16);
OPENVINO_TYPE_TO_MIN_CONST(element::f32);
OPENVINO_TYPE_TO_MIN_CONST(element::f64);
OPENVINO_TYPE_TO_MIN_CONST(element::i8);
OPENVINO_TYPE_TO_MIN_CONST(element::i16);
OPENVINO_TYPE_TO_MIN_CONST(element::i32);
OPENVINO_TYPE_TO_MIN_CONST(element::i64);
OPENVINO_TYPE_TO_MIN_CONST(element::u1);
OPENVINO_TYPE_TO_MIN_CONST(element::u8);
OPENVINO_TYPE_TO_MIN_CONST(element::u16);
OPENVINO_TYPE_TO_MIN_CONST(element::u32);
OPENVINO_TYPE_TO_MIN_CONST(element::u64);
default:
return nullptr;
}
auto tensor = ov::util::make_tensor_of_min_value(t);
return tensor ? std::make_shared<op::v0::Constant>(tensor) : nullptr;
}
std::shared_ptr<op::v0::Constant> get_constant_lowest_of_type(element::Type_t t) {
@ -1407,6 +1383,48 @@ Tensor make_tensor_of_max_value(const element::Type_t et) {
}
}
template <class T>
Tensor make_tensor_of_min_value(const element::Type_t et) {
Tensor t{et, Shape{}};
*t.data<T>() = std::numeric_limits<T>::min();
return t;
}
Tensor make_tensor_of_min_value(const element::Type_t et) {
switch (et) {
case element::boolean:
return make_tensor_of_min_value<ov::fundamental_type_for<element::boolean>>(et);
case element::bf16:
return make_tensor_of_min_value<ov::fundamental_type_for<element::bf16>>(et);
case element::f16:
return make_tensor_of_min_value<ov::fundamental_type_for<element::f16>>(et);
case element::f32:
return make_tensor_of_min_value<ov::fundamental_type_for<element::f32>>(et);
case element::f64:
return make_tensor_of_min_value<ov::fundamental_type_for<element::f64>>(et);
case element::i8:
return make_tensor_of_min_value<ov::fundamental_type_for<element::i8>>(et);
case element::i16:
return make_tensor_of_min_value<ov::fundamental_type_for<element::i16>>(et);
case element::i32:
return make_tensor_of_min_value<ov::fundamental_type_for<element::i32>>(et);
case element::i64:
return make_tensor_of_min_value<ov::fundamental_type_for<element::i64>>(et);
case element::u1:
return make_tensor_of_min_value<ov::fundamental_type_for<element::u1>>(et);
case element::u8:
return make_tensor_of_min_value<ov::fundamental_type_for<element::u8>>(et);
case element::u16:
return make_tensor_of_min_value<ov::fundamental_type_for<element::u16>>(et);
case element::u32:
return make_tensor_of_min_value<ov::fundamental_type_for<element::u32>>(et);
case element::u64:
return make_tensor_of_min_value<ov::fundamental_type_for<element::u64>>(et);
default:
return {};
}
}
std::vector<PartialShape> get_tensors_partial_shapes(const TensorVector& tensors) {
std::vector<PartialShape> shapes;
shapes.reserve(tensors.size());