diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/eval_helpers.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/eval_helpers.hpp deleted file mode 100644 index 935237778d1..00000000000 --- a/ngraph/core/reference/include/ngraph/runtime/reference/eval_helpers.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ngraph/runtime/host_tensor.hpp" - -namespace ngraph -{ - namespace eval - { - AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name); - } -} // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp index f9e24d89292..f2789da27de 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/logical_reduction.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include "ngraph/coordinate_transform.hpp" #include "ngraph/shape_util.hpp" @@ -17,53 +18,75 @@ namespace ngraph { static inline void reduce_logical_and(const char* arg, char* out, - const Shape& input_shape, - const AxisSet& reduction_axes, - bool keep_dims) + const Shape& in_shape, + const AxisSet& reduction_axes) { - CoordinateTransform output_transform( - reduce(input_shape, reduction_axes, keep_dims)); + constexpr bool dont_keep_dims_in_output = false; + const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output); + std::fill(out, out + shape_size(out_shape), 1); - for (const Coordinate& output_coord : output_transform) - { - out[output_transform.index(output_coord)] = 1; - } - - CoordinateTransform input_transform(input_shape); + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + CoordinateTransformBasic input_transform(in_shape); for (const Coordinate& input_coord : input_transform) { - Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); - out[output_transform.index(output_coord)] = - out[output_transform.index(output_coord)] && - arg[input_transform.index(input_coord)]; + const Coordinate output_coord = + reduce(input_coord, reduction_axes, dont_keep_dims_in_output); + + const size_t in_idx = std::inner_product( + input_coord.begin(), input_coord.end(), in_strides.begin(), 0); + const size_t out_idx = std::inner_product( + output_coord.begin(), output_coord.end(), out_strides.begin(), 0); + + out[out_idx] = out[out_idx] && arg[in_idx]; } } + NGRAPH_DEPRECATED("Remove when arm plugin supports the new signature") + static inline void reduce_logical_and(const char* arg, + char* out, + const Shape& input_shape, + const AxisSet& reduction_axes, + bool) + { + reduce_logical_and(arg, out, input_shape, reduction_axes); + } + static inline void reduce_logical_or(const char* arg, char* out, - const Shape& input_shape, - const AxisSet& reduction_axes, - bool keep_dims) + const Shape& in_shape, + const AxisSet& reduction_axes) { - CoordinateTransform output_transform( - reduce(input_shape, reduction_axes, keep_dims)); + const auto out_shape = reduce(in_shape, reduction_axes, false); + std::fill(out, out + shape_size(out_shape), 0); - for (const Coordinate& output_coord : output_transform) - { - out[output_transform.index(output_coord)] = 0; - } - - CoordinateTransform input_transform(input_shape); + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + CoordinateTransformBasic input_transform(in_shape); for (const Coordinate& input_coord : input_transform) { - Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims); - out[output_transform.index(output_coord)] = - out[output_transform.index(output_coord)] || - arg[input_transform.index(input_coord)]; + const Coordinate output_coord = reduce(input_coord, reduction_axes, false); + + const size_t in_idx = std::inner_product( + input_coord.begin(), input_coord.end(), in_strides.begin(), 0); + const size_t out_idx = std::inner_product( + output_coord.begin(), output_coord.end(), out_strides.begin(), 0); + + out[out_idx] = out[out_idx] || arg[in_idx]; } } + + NGRAPH_DEPRECATED("Remove when arm plugin supports the new signature") + static inline void reduce_logical_or(const char* arg, + char* out, + const Shape& input_shape, + const AxisSet& reduction_axes, + bool) + { + reduce_logical_or(arg, out, input_shape, reduction_axes); + } } // namespace reference } // namespace runtime } // namespace ngraph diff --git a/ngraph/core/reference/src/runtime/reference/eval_helpers.cpp b/ngraph/core/reference/src/runtime/reference/eval_helpers.cpp deleted file mode 100644 index 9aae80310a3..00000000000 --- a/ngraph/core/reference/src/runtime/reference/eval_helpers.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ngraph/check.hpp" -#include "ngraph/runtime/reference/eval_helpers.hpp" -#include "ngraph/util.hpp" - -namespace ngraph -{ - namespace eval - { - AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name) - { - const auto axes_in_tensor = host_tensor_2_vector(axes); - - const bool negative_axis_received = - std::any_of(axes_in_tensor.begin(), axes_in_tensor.end(), [](const int64_t axis) { - return axis < 0; - }); - - NGRAPH_CHECK(!negative_axis_received, - "Negative axis value received in the ", - op_name, - " evaluation. This case is not supported."); - - return AxisSet( - std::vector(axes_in_tensor.begin(), axes_in_tensor.end())); - } - } // namespace eval -} // namespace ngraph diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index c75c244d590..6b578c894ac 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -3,10 +3,11 @@ // #include "ngraph/op/reduce_logical_and.hpp" +#include #include "itt.hpp" #include "ngraph/log.hpp" +#include "ngraph/op/util/evaluate_helpers.hpp" #include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/reference/eval_helpers.hpp" #include "ngraph/runtime/reference/logical_reduction.hpp" using namespace ngraph; @@ -32,28 +33,20 @@ shared_ptr op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVec return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -namespace +namespace reduce_and { bool evaluate_reduce_logical_and(const HostTensorPtr& data, - const HostTensorPtr& axes, const HostTensorPtr& out, + const AxisSet& reduction_axes, bool keep_dims) { - if (data->get_element_type() != element::boolean || - !axes->get_element_type().is_integral_number()) - { - return false; - } + out->set_shape(reduce(data->get_shape(), reduction_axes, keep_dims)); try { - const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalAnd"); - runtime::reference::reduce_logical_and(data->get_data_ptr(), out->get_data_ptr(), data->get_shape(), - reduction_axes, - keep_dims); - + reduction_axes); return true; } catch (const ngraph_error& e) @@ -62,16 +55,25 @@ namespace return false; } } -} // namespace +} // namespace reduce_and bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate); + NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); + NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); const auto& data = inputs[0]; const auto& axes = inputs[1]; const auto& out = outputs[0]; - return evaluate_reduce_logical_and(data, axes, out, get_keep_dims()); + if (data->get_element_type() != element::boolean || + !axes->get_element_type().is_integral_number()) + { + return false; + } + const auto reduction_axes = get_normalized_axes_from_tensor( + axes, data->get_partial_shape().rank(), get_friendly_name()); + return reduce_and::evaluate_reduce_logical_and(data, out, reduction_axes, get_keep_dims()); } bool op::v1::ReduceLogicalAnd::has_evaluate() const diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index a6afa5f77c8..427b576bf91 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -3,10 +3,11 @@ // #include "ngraph/op/reduce_logical_or.hpp" +#include #include "itt.hpp" #include "ngraph/log.hpp" +#include "ngraph/op/util/evaluate_helpers.hpp" #include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/reference/eval_helpers.hpp" #include "ngraph/runtime/reference/logical_reduction.hpp" using namespace ngraph; @@ -32,28 +33,20 @@ shared_ptr op::v1::ReduceLogicalOr::clone_with_new_inputs(const OutputVect return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -namespace +namespace reduce_or { bool evaluate_reduce_logical_or(const HostTensorPtr& data, - const HostTensorPtr& axes, const HostTensorPtr& out, + const AxisSet& reduction_axes, bool keep_dims) { - if (data->get_element_type() != element::boolean || - !axes->get_element_type().is_integral_number()) - { - return false; - } + out->set_shape(reduce(data->get_shape(), reduction_axes, keep_dims)); try { - const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalOr"); - runtime::reference::reduce_logical_or(data->get_data_ptr(), out->get_data_ptr(), data->get_shape(), - reduction_axes, - keep_dims); - + reduction_axes); return true; } catch (const ngraph_error& e) @@ -62,16 +55,25 @@ namespace return false; } } -} // namespace +} // namespace reduce_or bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate); + NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); + NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); const auto& data = inputs[0]; const auto& axes = inputs[1]; const auto& out = outputs[0]; - return evaluate_reduce_logical_or(data, axes, out, get_keep_dims()); + if (data->get_element_type() != element::boolean || + !axes->get_element_type().is_integral_number()) + { + return false; + } + const auto reduction_axes = get_normalized_axes_from_tensor( + axes, data->get_partial_shape().rank(), get_friendly_name()); + return reduce_or::evaluate_reduce_logical_or(data, out, reduction_axes, get_keep_dims()); } bool op::v1::ReduceLogicalOr::has_evaluate() const diff --git a/ngraph/test/eval.cpp b/ngraph/test/eval.cpp index f5602023c5c..1ba827190ae 100644 --- a/ngraph/test/eval.cpp +++ b/ngraph/test/eval.cpp @@ -1810,28 +1810,6 @@ TEST(eval, topk_v1_param_dyn_k0) ASSERT_EQ(result1_val, expec1); } -TEST(eval, reduce_logical_and__neg_axis) -{ - const auto data = make_shared(element::boolean, Shape{2, 2, 2}); - const auto axes = make_shared(element::i64, Shape{}); - - const auto op = make_shared(data, axes); - - auto fun = make_shared(op, ParameterVector{data, axes}); - - auto result = make_shared(); - - // when ReduceLogicalAnd node evaluator returns false -> the Function object throws - EXPECT_THROW( - fun->evaluate({result}, - { - make_host_tensor( - Shape{2, 2, 2}, {true, false, true, false, true, false, true, false}), - make_host_tensor(Shape{}, {-1}), - }), - ngraph::ngraph_error); -} - TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32) { const Shape data_shape{3, 3};