From 8d6238a3d77b1a915d384eab0f3e92b437956dbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Do=C5=82bniak?= Date: Mon, 20 Jul 2020 10:20:05 +0200 Subject: [PATCH] Reference implementations for ReduceLogicalAnd & ReduceLogicalOr (#1333) --- ngraph/src/ngraph/CMakeLists.txt | 4 +- ngraph/src/ngraph/op/reduce_logical_and.cpp | 46 ++++++++++ ngraph/src/ngraph/op/reduce_logical_and.hpp | 3 + ngraph/src/ngraph/op/reduce_logical_or.cpp | 46 ++++++++++ ngraph/src/ngraph/op/reduce_logical_or.hpp | 3 + .../constant_folding_logical_reduction.cpp | 39 +-------- .../ngraph/runtime/reference/eval_helpers.cpp | 42 +++++++++ .../ngraph/runtime/reference/eval_helpers.hpp | 27 ++++++ .../runtime/reference/logical_reduction.hpp | 86 +++++++++++++++++++ ngraph/test/eval.cpp | 23 +++++ 10 files changed, 283 insertions(+), 36 deletions(-) create mode 100644 ngraph/src/ngraph/runtime/reference/eval_helpers.cpp create mode 100644 ngraph/src/ngraph/runtime/reference/eval_helpers.hpp create mode 100644 ngraph/src/ngraph/runtime/reference/logical_reduction.hpp diff --git a/ngraph/src/ngraph/CMakeLists.txt b/ngraph/src/ngraph/CMakeLists.txt index 9aabe0d2a9f..558baeb21cf 100644 --- a/ngraph/src/ngraph/CMakeLists.txt +++ b/ngraph/src/ngraph/CMakeLists.txt @@ -86,7 +86,7 @@ set (SRC except.hpp factory.cpp factory.hpp - factory_adapter.hpp + factory_adapter.hpp file_util.cpp file_util.hpp function.cpp @@ -570,6 +570,8 @@ set (SRC runtime/host_tensor.hpp runtime/tensor.cpp runtime/tensor.hpp + runtime/reference/eval_helpers.cpp + runtime/reference/eval_helpers.hpp shape.cpp shape.hpp shape_util.cpp diff --git a/ngraph/src/ngraph/op/reduce_logical_and.cpp b/ngraph/src/ngraph/op/reduce_logical_and.cpp index 56457f5e638..7cc6765ba19 100644 --- a/ngraph/src/ngraph/op/reduce_logical_and.cpp +++ b/ngraph/src/ngraph/op/reduce_logical_and.cpp @@ -15,6 +15,10 @@ //***************************************************************************** #include "ngraph/op/reduce_logical_and.hpp" +#include "ngraph/log.hpp" +#include "ngraph/runtime/host_tensor.hpp" +#include "ngraph/runtime/reference/eval_helpers.hpp" +#include "ngraph/runtime/reference/logical_reduction.hpp" using namespace ngraph; using namespace std; @@ -34,3 +38,45 @@ shared_ptr op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVec check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } + +namespace +{ + bool evaluate_reduce_logical_and(const HostTensorPtr& data, + const HostTensorPtr& axes, + const HostTensorPtr& out) + { + try + { + const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalAnd"); + + runtime::reference::reduce_logical_and(data->get_data_ptr(), + out->get_data_ptr(), + data->get_shape(), + reduction_axes); + + return true; + } + catch (const ngraph_error& e) + { + NGRAPH_WARN << e.what(); + return false; + } + } +} + +bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) +{ + const auto& data = inputs[0]; + const auto& axes = inputs[1]; + const auto& out = outputs[0]; + + if (data->get_element_type() != element::boolean || axes->get_element_type() != element::i64) + { + return false; + } + else + { + return evaluate_reduce_logical_and(data, axes, out); + } +} diff --git a/ngraph/src/ngraph/op/reduce_logical_and.hpp b/ngraph/src/ngraph/op/reduce_logical_and.hpp index dd7481e059a..e1605da1528 100644 --- a/ngraph/src/ngraph/op/reduce_logical_and.hpp +++ b/ngraph/src/ngraph/op/reduce_logical_and.hpp @@ -46,6 +46,9 @@ namespace ngraph virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) override; }; } } diff --git a/ngraph/src/ngraph/op/reduce_logical_or.cpp b/ngraph/src/ngraph/op/reduce_logical_or.cpp index 597a35cba22..de4d0c4ecf9 100644 --- a/ngraph/src/ngraph/op/reduce_logical_or.cpp +++ b/ngraph/src/ngraph/op/reduce_logical_or.cpp @@ -15,6 +15,10 @@ //***************************************************************************** #include "ngraph/op/reduce_logical_or.hpp" +#include "ngraph/log.hpp" +#include "ngraph/runtime/host_tensor.hpp" +#include "ngraph/runtime/reference/eval_helpers.hpp" +#include "ngraph/runtime/reference/logical_reduction.hpp" using namespace ngraph; using namespace std; @@ -34,3 +38,45 @@ shared_ptr op::v1::ReduceLogicalOr::clone_with_new_inputs(const OutputVect check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } + +namespace +{ + bool evaluate_reduce_logical_or(const HostTensorPtr& data, + const HostTensorPtr& axes, + const HostTensorPtr& out) + { + try + { + const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalOr"); + + runtime::reference::reduce_logical_or(data->get_data_ptr(), + out->get_data_ptr(), + data->get_shape(), + reduction_axes); + + return true; + } + catch (const ngraph_error& e) + { + NGRAPH_WARN << e.what(); + return false; + } + } +} + +bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) +{ + const auto& data = inputs[0]; + const auto& axes = inputs[1]; + const auto& out = outputs[0]; + + if (data->get_element_type() != element::boolean || axes->get_element_type() != element::i64) + { + return false; + } + else + { + return evaluate_reduce_logical_or(data, axes, out); + } +} diff --git a/ngraph/src/ngraph/op/reduce_logical_or.hpp b/ngraph/src/ngraph/op/reduce_logical_or.hpp index 31488a611ef..46b92bfb16c 100644 --- a/ngraph/src/ngraph/op/reduce_logical_or.hpp +++ b/ngraph/src/ngraph/op/reduce_logical_or.hpp @@ -46,6 +46,9 @@ namespace ngraph virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) override; }; } } diff --git a/ngraph/src/ngraph/pass/constant_folding_logical_reduction.cpp b/ngraph/src/ngraph/pass/constant_folding_logical_reduction.cpp index 68ae9f006e5..9e52806d343 100644 --- a/ngraph/src/ngraph/pass/constant_folding_logical_reduction.cpp +++ b/ngraph/src/ngraph/pass/constant_folding_logical_reduction.cpp @@ -19,25 +19,11 @@ #include "ngraph/op/reduce_logical_and.hpp" #include "ngraph/op/reduce_logical_or.hpp" #include "ngraph/runtime/reference/any.hpp" +#include "ngraph/runtime/reference/logical_reduction.hpp" using namespace std; using namespace ngraph; -static Shape get_shape_no_keep_dims(const AxisSet& reduction_axes, const Shape& input_shape) -{ - Shape shape_no_keep_dims; - - for (size_t i = 0; i < input_shape.size(); i++) - { - if (reduction_axes.count(i) == 0) - { - shape_no_keep_dims.push_back(input_shape[i]); - } - } - - return shape_no_keep_dims; -} - static shared_ptr fold_constant_logical_reduction(shared_ptr constant, shared_ptr reduction_node) { @@ -57,33 +43,16 @@ static shared_ptr fold_constant_logical_reduction(shared_ptrget_reduction_axes(); const auto input_shape = reduce_and->get_input_shape(0); const char* arg = constant->get_data_ptr(); - CoordinateTransform output_transform(get_shape_no_keep_dims(reduction_axes, input_shape)); - for (const Coordinate& output_coord : output_transform) - { - data_ptr[output_transform.index(output_coord)] = 1; - } - - CoordinateTransform input_transform(constant->get_output_shape(0)); - - for (const Coordinate& input_coord : input_transform) - { - Coordinate output_coord = reduce(input_coord, reduction_axes); - data_ptr[output_transform.index(output_coord)] = - data_ptr[output_transform.index(output_coord)] && - arg[input_transform.index(input_coord)]; - } + runtime::reference::reduce_logical_and(arg, data_ptr, input_shape, reduction_axes); } else if (auto reduce_or = as_type_ptr<::ngraph::op::v1::ReduceLogicalOr>(reduction_node)) { const auto reduction_axes = reduce_or->get_reduction_axes(); const auto input_shape = reduce_or->get_input_shape(0); + const char* arg = constant->get_data_ptr(); - runtime::reference::any(constant->get_data_ptr(), - data_ptr, - constant->get_output_shape(0), - get_shape_no_keep_dims(reduction_axes, input_shape), - reduction_axes); + runtime::reference::reduce_logical_or(arg, data_ptr, input_shape, reduction_axes); } else { diff --git a/ngraph/src/ngraph/runtime/reference/eval_helpers.cpp b/ngraph/src/ngraph/runtime/reference/eval_helpers.cpp new file mode 100644 index 00000000000..de3b322fa45 --- /dev/null +++ b/ngraph/src/ngraph/runtime/reference/eval_helpers.cpp @@ -0,0 +1,42 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include + +#include "ngraph/check.hpp" +#include "ngraph/runtime/reference/eval_helpers.hpp" + +namespace ngraph +{ + namespace eval + { + AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name) + { + const auto axes_count = axes->get_element_count(); + const auto axes_buffer = axes->get_data_ptr(); + + const bool negative_axis_received = std::any_of( + axes_buffer, axes_buffer + axes_count, [](const int64_t axis) { return axis < 0; }); + + NGRAPH_CHECK(!negative_axis_received, + "Negative axis value received in the ", + op_name, + " evaluation. This case is not supported."); + + return AxisSet(std::vector(axes_buffer, axes_buffer + axes_count)); + } + } +} diff --git a/ngraph/src/ngraph/runtime/reference/eval_helpers.hpp b/ngraph/src/ngraph/runtime/reference/eval_helpers.hpp new file mode 100644 index 00000000000..6b9081098c5 --- /dev/null +++ b/ngraph/src/ngraph/runtime/reference/eval_helpers.hpp @@ -0,0 +1,27 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/runtime/host_tensor.hpp" + +namespace ngraph +{ + namespace eval + { + AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name); + } +} diff --git a/ngraph/src/ngraph/runtime/reference/logical_reduction.hpp b/ngraph/src/ngraph/runtime/reference/logical_reduction.hpp new file mode 100644 index 00000000000..9a13a792956 --- /dev/null +++ b/ngraph/src/ngraph/runtime/reference/logical_reduction.hpp @@ -0,0 +1,86 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/coordinate_transform.hpp" +#include "ngraph/runtime/reference/any.hpp" +#include "ngraph/shape_util.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace + { + Shape get_shape_no_keep_dims(const AxisSet& reduction_axes, const Shape& input_shape) + { + Shape shape_no_keep_dims; + + for (size_t i = 0; i < input_shape.size(); i++) + { + if (reduction_axes.count(i) == 0) + { + shape_no_keep_dims.push_back(input_shape[i]); + } + } + + return shape_no_keep_dims; + } + } + + namespace reference + { + static inline void reduce_logical_and(const char* arg, + char* out, + const Shape& input_shape, + const AxisSet& reduction_axes) + { + CoordinateTransform output_transform( + get_shape_no_keep_dims(reduction_axes, input_shape)); + + for (const Coordinate& output_coord : output_transform) + { + out[output_transform.index(output_coord)] = 1; + } + + CoordinateTransform input_transform(input_shape); + + for (const Coordinate& input_coord : input_transform) + { + Coordinate output_coord = reduce(input_coord, reduction_axes); + out[output_transform.index(output_coord)] = + out[output_transform.index(output_coord)] && + arg[input_transform.index(input_coord)]; + } + } + + static inline void reduce_logical_or(const char* arg, + char* out, + const Shape& input_shape, + const AxisSet& reduction_axes) + { + runtime::reference::any(arg, + out, + input_shape, + get_shape_no_keep_dims(reduction_axes, input_shape), + reduction_axes); + } + } + } +} diff --git a/ngraph/test/eval.cpp b/ngraph/test/eval.cpp index 32a2999a66b..a65829e784a 100644 --- a/ngraph/test/eval.cpp +++ b/ngraph/test/eval.cpp @@ -51,6 +51,7 @@ #include "ngraph/op/not.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/range.hpp" +#include "ngraph/op/reduce_logical_and.hpp" #include "ngraph/op/relu.hpp" #include "ngraph/op/reshape.hpp" #include "ngraph/op/round.hpp" @@ -1892,3 +1893,25 @@ TEST(eval, topk_v0_param_dyn_k0) vector expec0{0, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1, 0}; ASSERT_EQ(result0_val, expec0); } + +TEST(eval, reduce_logical_and__neg_axis) +{ + const auto data = make_shared(element::boolean, Shape{2, 2, 2}); + const auto axes = make_shared(element::i64, Shape{}); + + const auto op = make_shared(data, axes); + + auto fun = make_shared(op, ParameterVector{data, axes}); + + auto result = make_shared(); + + // when ReduceLogicalAnd node evaluator returns false -> the Function object throws + EXPECT_THROW( + fun->evaluate({result}, + { + make_host_tensor( + Shape{2, 2, 2}, {true, false, true, false, true, false, true, false}), + make_host_tensor(Shape{}, {-1}), + }), + ngraph::ngraph_error); +}