[core]Api 2.0/migrate logical reduce ops to new API (#20043)

* Migrate ReduceLogicalOr to new API

* Migrate ReduceLogicalAnd to new API
This commit is contained in:
Pawel Raasz 2023-09-26 14:45:19 +02:00 committed by GitHub
parent 27decbbd48
commit 519f13a177
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 122 additions and 127 deletions

View File

@ -28,9 +28,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -28,9 +28,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -8,54 +8,66 @@
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/reference/utils/coordinate_index.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
OPENVINO_SUPPRESS_DEPRECATED_START
/**
* @brief Reference implementation of ReduceLogicalAnd operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
static inline void reduce_logical_and(const char* arg,
char* out,
const Shape& in_shape,
const AxisSet& reduction_axes) {
constexpr bool dont_keep_dims_in_output = false;
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
const auto out_shape = ov::util::reduce(in_shape, reduction_axes);
std::fill(out, out + shape_size(out_shape), 1);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
for (const auto& in_coord : input_transform) {
const auto out_coord = ov::util::reduce(in_coord, reduction_axes);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
const auto in_idx = ov::coordinate_offset(in_coord, in_strides);
const auto out_idx = ov::coordinate_offset(out_coord, out_strides);
out[out_idx] = out[out_idx] && arg[in_idx];
}
}
/**
* @brief Reference implementation of ReduceLogicalOr operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
static inline void reduce_logical_or(const char* arg, char* out, const Shape& in_shape, const AxisSet& reduction_axes) {
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, false);
const auto out_shape = ov::util::reduce(in_shape, reduction_axes);
std::fill(out, out + shape_size(out_shape), 0);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, false);
for (const auto& in_coord : input_transform) {
const auto out_coord = ov::util::reduce(in_coord, reduction_axes);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
const auto in_idx = ov::coordinate_offset(in_coord, in_strides);
const auto out_idx = ov::coordinate_offset(out_coord, out_strides);
out[out_idx] = out[out_idx] || arg[in_idx];
}
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace reference
} // namespace ov

View File

@ -11,4 +11,15 @@ namespace ov {
std::size_t coordinate_index(const Coordinate& c, const Shape& s);
/**
* @brief Calculate offset from begin of buffer based on coordinate and strides.
*
* If coordinates and strides have different sizes then result is undefined behaviour.
*
* @param coordinate Vector with multi-dimension coordinates.
* @param strides Vector with multi-dimension strides
* @return Offset of element from start of buffer.
*/
size_t coordinate_offset(const std::vector<size_t>& coordinate, const std::vector<size_t>& strides);
} // namespace ov

View File

@ -25,4 +25,8 @@ std::size_t coordinate_index(const Coordinate& c, const Shape& s) {
return index;
}
size_t coordinate_offset(const std::vector<size_t>& coordinate, const std::vector<size_t>& strides) {
return std::inner_product(coordinate.cbegin(), coordinate.cend(), strides.cbegin(), static_cast<size_t>(0));
}
} // namespace ov

View File

@ -2,76 +2,61 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_logical_and.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_logical_and.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/logical_reduction.hpp"
#include "openvino/util/log.hpp"
using namespace ngraph;
using namespace std;
namespace ov {
namespace op {
namespace reduce_and {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
op::v1::ReduceLogicalAnd::ReduceLogicalAnd(const Output<Node>& data,
const Output<Node>& reduction_axes,
const bool keep_dims)
template <element::Type_t ET>
static result_type visit(const Tensor& in, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_logical_and(in.data<const T>(), out.data<T>(), in.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_and
namespace v1 {
ReduceLogicalAnd::ReduceLogicalAnd(const Output<Node>& data, const Output<Node>& reduction_axes, const bool keep_dims)
: LogicalReductionKeepDims(data, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceLogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceLogicalAnd_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceLogicalAnd>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceLogicalAnd>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace reduce_and {
namespace {
bool evaluate_reduce_logical_and(const HostTensorPtr& data,
const HostTensorPtr& out,
const AxisSet& reduction_axes,
bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(data->get_shape(), reduction_axes, keep_dims));
try {
ov::reference::reduce_logical_and(data->get_data_ptr<char>(),
out->get_data_ptr<char>(),
data->get_shape(),
reduction_axes);
return true;
} catch (const ngraph_error& e) {
OPENVINO_WARN << e.what();
return false;
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace
} // namespace reduce_and
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceLogicalAnd::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceLogicalAnd_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(inputs.size() == 2);
OPENVINO_ASSERT(outputs.size() == 1);
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
if (data->get_element_type() != element::boolean || !axes->get_element_type().is_integral_number()) {
return false;
}
const auto reduction_axes =
get_normalized_axes_from_tensor(axes, data->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
return reduce_and::evaluate_reduce_logical_and(data, out, reduction_axes, get_keep_dims());
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<boolean>::apply<reduce_and::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceLogicalAnd::has_evaluate() const {
bool ReduceLogicalAnd::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceLogicalAnd_has_evaluate);
return get_input_element_type(0) == element::boolean && get_input_element_type(1).is_integral_number();
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -2,76 +2,63 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_logical_or.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_logical_or.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/logical_reduction.hpp"
#include "openvino/util/log.hpp"
using namespace ngraph;
using namespace std;
namespace ov {
namespace op {
namespace reduce_or {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
op::v1::ReduceLogicalOr::ReduceLogicalOr(const Output<Node>& data,
const Output<Node>& reduction_axes,
const bool keep_dims)
template <element::Type_t ET>
static result_type visit(const Tensor& in, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_logical_or(in.data<const T>(), out.data<T>(), in.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_or
namespace v1 {
ReduceLogicalOr::ReduceLogicalOr(const Output<Node>& data, const Output<Node>& reduction_axes, const bool keep_dims)
: LogicalReductionKeepDims(data, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceLogicalOr::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceLogicalOr::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceLogicalOr_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceLogicalOr>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceLogicalOr>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace reduce_or {
namespace {
bool evaluate_reduce_logical_or(const HostTensorPtr& data,
const HostTensorPtr& out,
const AxisSet& reduction_axes,
bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(data->get_shape(), reduction_axes, keep_dims));
try {
ov::reference::reduce_logical_or(data->get_data_ptr<char>(),
out->get_data_ptr<char>(),
data->get_shape(),
reduction_axes);
return true;
} catch (const ngraph_error& e) {
OPENVINO_WARN << e.what();
return false;
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace
} // namespace reduce_or
bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceLogicalOr::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceLogicalOr_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
if (data->get_element_type() != element::boolean || !axes->get_element_type().is_integral_number()) {
return false;
}
const auto reduction_axes =
get_normalized_axes_from_tensor(axes, data->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
return reduce_or::evaluate_reduce_logical_or(data, out, reduction_axes, get_keep_dims());
OPENVINO_ASSERT(inputs.size() == 2);
OPENVINO_ASSERT(outputs.size() == 1);
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<boolean>::apply<reduce_or::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceLogicalOr::has_evaluate() const {
bool ReduceLogicalOr::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceLogicalOr_has_evaluate);
return get_input_element_type(0) == element::boolean && get_input_element_type(1).is_integral_number();
}
} // namespace v1
} // namespace op
} // namespace ov