Migrate mod op evaluate (#19687)

This commit is contained in:
Pawel Raasz 2023-09-12 13:27:51 +02:00 committed by GitHub
parent 693c6d7a11
commit e3f1ff7f2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 141 additions and 88 deletions

View File

@ -27,10 +27,55 @@ OPENVINO_API Shape make_dynamic_shape();
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API bool is_dynamic_shape(const Shape& s);
/**
* @brief Creates reduced shape from input by removing dimensions.
*
* @param input Input shape for reduce calculation.
* @param axes Reduction axes.
* @return Reduced shape.
*/
OPENVINO_API Shape reduce(const Shape& input, const AxisSet& axes);
/**
* @brief Creates reduced shape from input removing or replacing dimension.
*
* The reduction type depends on `keep_dims` flags. If it's set to true then reduced dimension will be replaced by `1`,
* otherwise removed.
*
* @param input Input shape for reduce calculation.
* @param axes Reduction axes.
* @param keep_dims Flag to keep reduced dimension.
* @return Reduced shape.
*/
OPENVINO_API Shape reduce(const Shape& input, const AxisSet& axes, const bool keep_dims);
/**
* @brief Creates reduced vector from input by removing elements.
*
* @param input Input vector for reduce calculation.
* @param axes Reduction axes.
* @return Reduced vector
*/
OPENVINO_API std::vector<size_t> reduce(const std::vector<size_t>& input, const AxisSet& axes);
/**
* @brief Creates reduced shape from input by replacing reduced dimension with `1`.
*
* @param input Input shape for reduce calculation.
* @param axes Reduction axes.
* @return Reduced shape.
*/
OPENVINO_API Shape reduce_keep_dims(const Shape& input, const AxisSet& axes);
/**
* @brief Get the broadcast shape as merge second shape into first according to broadcast specification.
*
* @param first First input shape.
* @param second Second input shape.
* @param broadcast_spec Broadcast specification.
*
* @return Result shape from inputs with applied broadcast specification.
*/
Shape get_broadcast_shape(const Shape& first, const Shape& second, const op::AutoBroadcastSpec& broadcast_spec);
} // namespace util
} // namespace ov

View File

@ -8,19 +8,41 @@
#include <cstddef>
#include "openvino/reference/autobroadcast_binop.hpp"
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
template <typename T>
void mod(const T* arg0,
const T* arg1,
T* out,
namespace func {
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
constexpr T mod(const T x, const T y) {
return x % y;
}
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T mod(const T x, const T y) {
return x - (std::trunc(x / y) * y);
}
} // namespace func
/**
* @brief Reference implementation of binary elementwise Mod operator.
*
* @param arg0 Iterator to input 0 data.
* @param arg1 Iterator to input 1 data.
* @param out Iterator to output data.
* @param arg_shape0 Input 0 shape.
* @param arg_shape1 Input 1 shape.
* @param broadcast_spec Broadcast specification mode.
*/
template <class InputIt, class OutputIt>
void mod(InputIt arg0,
InputIt arg1,
OutputIt out,
const Shape& arg_shape0,
const Shape& arg_shape1,
const op::AutoBroadcastSpec& broadcast_spec) {
autobroadcast_binop(arg0, arg1, out, arg_shape0, arg_shape1, broadcast_spec, [](T x, T y) -> T {
return static_cast<T>(x - std::truncf(static_cast<float>(x / y)) * y);
});
using T = typename std::iterator_traits<OutputIt>::value_type;
autobroadcast_binop(arg0, arg1, out, arg_shape0, arg_shape1, broadcast_spec, &func::mod<T>);
}
} // namespace reference
} // namespace ov

View File

@ -2,101 +2,79 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/mod.hpp"
#include "openvino/op/mod.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "openvino/reference/mod.hpp"
#include "shape_util.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace mod {
struct Evaluate : ov::element::NoAction<bool> {
using ov::element::NoAction<bool>::visit;
// ------------------------------ v1 -------------------------------------------
template <element::Type_t ET>
static result_type visit(const Tensor& in0,
const Tensor& in1,
Tensor& out,
const AutoBroadcastSpec& broadcast_spec) {
using T = typename element_type_traits<ET>::value_type;
reference::mod(in0.data<const T>(),
in1.data<const T>(),
out.data<T>(),
in0.get_shape(),
in1.get_shape(),
broadcast_spec);
return true;
}
};
} // namespace mod
op::v1::Mod::Mod(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
namespace v1 {
v1::Mod::Mod(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::Mod::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Mod::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_Mod_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<Mod>(new_args.at(0), new_args.at(1), this->get_autob());
return std::make_shared<Mod>(new_args.at(0), new_args.at(1), this->get_autob());
}
namespace mod_op {
namespace {
template <typename T>
bool evaluate(const ov::Tensor& arg0,
const ov::Tensor& arg1,
const ov::Tensor& out,
const op::AutoBroadcastSpec& broadcast_spec) {
ov::reference::mod(arg0.data<T>(),
arg1.data<T>(),
out.data<T>(),
arg0.get_shape(),
arg1.get_shape(),
broadcast_spec);
return true;
}
bool evaluate_mod(const ov::Tensor& arg0,
const ov::Tensor& arg1,
const ov::Tensor& out,
const op::AutoBroadcastSpec& broadcast_spec) {
bool rc = true;
switch (arg0.get_element_type()) {
case ov::element::Type_t::i8: {
rc = evaluate<int8_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::i16: {
rc = evaluate<int16_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::i32: {
rc = evaluate<int32_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::i64: {
rc = evaluate<int64_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::u8: {
rc = evaluate<uint8_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::u16: {
rc = evaluate<uint16_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::u32: {
rc = evaluate<uint32_t>(arg0, arg1, out, broadcast_spec);
} break;
case ov::element::Type_t::u64: {
rc = evaluate<uint64_t>(arg0, arg1, out, broadcast_spec);
} break;
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace mod_op
bool op::v1::Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v1_Mod_evaluate);
return mod_op::evaluate_mod(inputs[0], inputs[1], outputs[0], get_autob());
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
outputs[0].set_shape(ov::util::get_broadcast_shape(inputs[0].get_shape(), inputs[1].get_shape(), get_autob()));
using namespace ov::element;
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<mod::Evaluate>(inputs[0].get_element_type(),
inputs[0],
inputs[1],
outputs[0],
get_autob());
}
bool op::v1::Mod::has_evaluate() const {
bool Mod::has_evaluate() const {
OV_OP_SCOPE(v1_Mod_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i8:
case ngraph::element::i16:
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u8:
case ngraph::element::u16:
case ngraph::element::u32:
case ngraph::element::u64:
case element::i8:
case element::i16:
case element::i32:
case element::i64:
case element::u8:
case element::u16:
case element::u32:
case element::u64:
return true;
default:
break;
}
return false;
}
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -6,6 +6,7 @@
#include <algorithm>
#include "openvino/core/partial_shape.hpp"
#include "shape_util.hpp"
using namespace ngraph;
@ -128,5 +129,12 @@ std::vector<size_t> reduce(const std::vector<size_t>& input, const AxisSet& axes
Shape reduce_keep_dims(const Shape& input, const AxisSet& axes) {
return ov::replace_container(input, axes);
}
Shape get_broadcast_shape(const Shape& first, const Shape& second, const op::AutoBroadcastSpec& broadcast_spec) {
auto out_shape = PartialShape(first);
OPENVINO_ASSERT(PartialShape::broadcast_merge_into(out_shape, second, broadcast_spec),
"Argument shapes are inconsistent");
return out_shape.to_shape();
}
} // namespace util
} // namespace ov

View File

@ -11,8 +11,8 @@ bool evaluate(const std::shared_ptr<ngraph::op::v1::Mod>& op,
const ngraph::HostTensorVector& outputs,
const ngraph::HostTensorVector& inputs) {
using T = typename ngraph::element_type_traits<ET>::value_type;
ov::reference::mod<T>(inputs[0]->get_data_ptr<T>(),
inputs[1]->get_data_ptr<T>(),
ov::reference::mod(inputs[0]->get_data_ptr<const T>(),
inputs[1]->get_data_ptr<const T>(),
outputs[0]->get_data_ptr<T>(),
inputs[0]->get_shape(),
inputs[1]->get_shape(),