Reference Implementation of Logical Reduce Operations (#6004)

* Remove CoordinateTransform call to index function to calculate tensor element indexes

* Allow negative axis values in axes host tensor

* Added constant expression to set keep_dims as false

* Use rank from host tensor to normalize axes

* Address minor comments

* Add const qualifier to local variables
* Add deprecated macro for arm plugin dependent function signatures

* Remove duplicate helper functions
This commit is contained in:
Gabriele Galiero Casay 2021-06-15 11:06:30 +02:00 committed by GitHub
parent 766d011b06
commit 134c66a933
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 87 additions and 130 deletions

View File

@ -1,15 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/runtime/host_tensor.hpp"
namespace ngraph
{
namespace eval
{
AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name);
}
} // namespace ngraph

View File

@ -5,6 +5,7 @@
#pragma once
#include <cmath>
#include <numeric>
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape_util.hpp"
@ -17,53 +18,75 @@ namespace ngraph
{
static inline void reduce_logical_and(const char* arg,
char* out,
const Shape& input_shape,
const AxisSet& reduction_axes,
bool keep_dims)
const Shape& in_shape,
const AxisSet& reduction_axes)
{
CoordinateTransform output_transform(
reduce(input_shape, reduction_axes, keep_dims));
constexpr bool dont_keep_dims_in_output = false;
const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), 1);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = 1;
}
CoordinateTransform input_transform(input_shape);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims);
out[output_transform.index(output_coord)] =
out[output_transform.index(output_coord)] &&
arg[input_transform.index(input_coord)];
const Coordinate output_coord =
reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
const size_t in_idx = std::inner_product(
input_coord.begin(), input_coord.end(), in_strides.begin(), 0);
const size_t out_idx = std::inner_product(
output_coord.begin(), output_coord.end(), out_strides.begin(), 0);
out[out_idx] = out[out_idx] && arg[in_idx];
}
}
NGRAPH_DEPRECATED("Remove when arm plugin supports the new signature")
static inline void reduce_logical_and(const char* arg,
char* out,
const Shape& input_shape,
const AxisSet& reduction_axes,
bool)
{
reduce_logical_and(arg, out, input_shape, reduction_axes);
}
static inline void reduce_logical_or(const char* arg,
char* out,
const Shape& input_shape,
const AxisSet& reduction_axes,
bool keep_dims)
const Shape& in_shape,
const AxisSet& reduction_axes)
{
CoordinateTransform output_transform(
reduce(input_shape, reduction_axes, keep_dims));
const auto out_shape = reduce(in_shape, reduction_axes, false);
std::fill(out, out + shape_size(out_shape), 0);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = 0;
}
CoordinateTransform input_transform(input_shape);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims);
out[output_transform.index(output_coord)] =
out[output_transform.index(output_coord)] ||
arg[input_transform.index(input_coord)];
const Coordinate output_coord = reduce(input_coord, reduction_axes, false);
const size_t in_idx = std::inner_product(
input_coord.begin(), input_coord.end(), in_strides.begin(), 0);
const size_t out_idx = std::inner_product(
output_coord.begin(), output_coord.end(), out_strides.begin(), 0);
out[out_idx] = out[out_idx] || arg[in_idx];
}
}
NGRAPH_DEPRECATED("Remove when arm plugin supports the new signature")
static inline void reduce_logical_or(const char* arg,
char* out,
const Shape& input_shape,
const AxisSet& reduction_axes,
bool)
{
reduce_logical_or(arg, out, input_shape, reduction_axes);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -1,33 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include "ngraph/check.hpp"
#include "ngraph/runtime/reference/eval_helpers.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
namespace eval
{
AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name)
{
const auto axes_in_tensor = host_tensor_2_vector<int64_t>(axes);
const bool negative_axis_received =
std::any_of(axes_in_tensor.begin(), axes_in_tensor.end(), [](const int64_t axis) {
return axis < 0;
});
NGRAPH_CHECK(!negative_axis_received,
"Negative axis value received in the ",
op_name,
" evaluation. This case is not supported.");
return AxisSet(
std::vector<AxisSet::value_type>(axes_in_tensor.begin(), axes_in_tensor.end()));
}
} // namespace eval
} // namespace ngraph

View File

@ -3,10 +3,11 @@
//
#include "ngraph/op/reduce_logical_and.hpp"
#include <ngraph/validation_util.hpp>
#include "itt.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/eval_helpers.hpp"
#include "ngraph/runtime/reference/logical_reduction.hpp"
using namespace ngraph;
@ -32,28 +33,20 @@ shared_ptr<Node> op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVec
return make_shared<op::v1::ReduceLogicalAnd>(new_args.at(0), new_args.at(1), get_keep_dims());
}
namespace
namespace reduce_and
{
bool evaluate_reduce_logical_and(const HostTensorPtr& data,
const HostTensorPtr& axes,
const HostTensorPtr& out,
const AxisSet& reduction_axes,
bool keep_dims)
{
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
out->set_shape(reduce(data->get_shape(), reduction_axes, keep_dims));
try
{
const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalAnd");
runtime::reference::reduce_logical_and(data->get_data_ptr<char>(),
out->get_data_ptr<char>(),
data->get_shape(),
reduction_axes,
keep_dims);
reduction_axes);
return true;
}
catch (const ngraph_error& e)
@ -62,16 +55,25 @@ namespace
return false;
}
}
} // namespace
} // namespace reduce_and
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceLogicalAnd_evaluate);
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_and(data, axes, out, get_keep_dims());
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
const auto reduction_axes = get_normalized_axes_from_tensor(
axes, data->get_partial_shape().rank(), get_friendly_name());
return reduce_and::evaluate_reduce_logical_and(data, out, reduction_axes, get_keep_dims());
}
bool op::v1::ReduceLogicalAnd::has_evaluate() const

View File

@ -3,10 +3,11 @@
//
#include "ngraph/op/reduce_logical_or.hpp"
#include <ngraph/validation_util.hpp>
#include "itt.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/eval_helpers.hpp"
#include "ngraph/runtime/reference/logical_reduction.hpp"
using namespace ngraph;
@ -32,28 +33,20 @@ shared_ptr<Node> op::v1::ReduceLogicalOr::clone_with_new_inputs(const OutputVect
return make_shared<op::v1::ReduceLogicalOr>(new_args.at(0), new_args.at(1), get_keep_dims());
}
namespace
namespace reduce_or
{
bool evaluate_reduce_logical_or(const HostTensorPtr& data,
const HostTensorPtr& axes,
const HostTensorPtr& out,
const AxisSet& reduction_axes,
bool keep_dims)
{
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
out->set_shape(reduce(data->get_shape(), reduction_axes, keep_dims));
try
{
const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalOr");
runtime::reference::reduce_logical_or(data->get_data_ptr<char>(),
out->get_data_ptr<char>(),
data->get_shape(),
reduction_axes,
keep_dims);
reduction_axes);
return true;
}
catch (const ngraph_error& e)
@ -62,16 +55,25 @@ namespace
return false;
}
}
} // namespace
} // namespace reduce_or
bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceLogicalOr_evaluate);
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
return evaluate_reduce_logical_or(data, axes, out, get_keep_dims());
if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number())
{
return false;
}
const auto reduction_axes = get_normalized_axes_from_tensor(
axes, data->get_partial_shape().rank(), get_friendly_name());
return reduce_or::evaluate_reduce_logical_or(data, out, reduction_axes, get_keep_dims());
}
bool op::v1::ReduceLogicalOr::has_evaluate() const

View File

@ -1810,28 +1810,6 @@ TEST(eval, topk_v1_param_dyn_k0)
ASSERT_EQ(result1_val, expec1);
}
TEST(eval, reduce_logical_and__neg_axis)
{
const auto data = make_shared<op::Parameter>(element::boolean, Shape{2, 2, 2});
const auto axes = make_shared<op::Parameter>(element::i64, Shape{});
const auto op = make_shared<op::v1::ReduceLogicalAnd>(data, axes);
auto fun = make_shared<Function>(op, ParameterVector{data, axes});
auto result = make_shared<HostTensor>();
// when ReduceLogicalAnd node evaluator returns false -> the Function object throws
EXPECT_THROW(
fun->evaluate({result},
{
make_host_tensor<element::Type_t::boolean>(
Shape{2, 2, 2}, {true, false, true, false, true, false, true, false}),
make_host_tensor<element::Type_t::i64>(Shape{}, {-1}),
}),
ngraph::ngraph_error);
}
TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32)
{
const Shape data_shape{3, 3};