Reference implementations for ReduceLogicalAnd & ReduceLogicalOr (#1333)

This commit is contained in:
Tomasz Dołbniak 2020-07-20 10:20:05 +02:00 committed by GitHub
parent eca80086ac
commit 8d6238a3d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 283 additions and 36 deletions

View File

@ -86,7 +86,7 @@ set (SRC
except.hpp
factory.cpp
factory.hpp
factory_adapter.hpp
factory_adapter.hpp
file_util.cpp
file_util.hpp
function.cpp
@ -570,6 +570,8 @@ set (SRC
runtime/host_tensor.hpp
runtime/tensor.cpp
runtime/tensor.hpp
runtime/reference/eval_helpers.cpp
runtime/reference/eval_helpers.hpp
shape.cpp
shape.hpp
shape_util.cpp

View File

@ -15,6 +15,10 @@
//*****************************************************************************
#include "ngraph/op/reduce_logical_and.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/eval_helpers.hpp"
#include "ngraph/runtime/reference/logical_reduction.hpp"
using namespace ngraph;
using namespace std;
@ -34,3 +38,45 @@ shared_ptr<Node> op::v1::ReduceLogicalAnd::clone_with_new_inputs(const OutputVec
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceLogicalAnd>(new_args.at(0), new_args.at(1), get_keep_dims());
}
namespace
{
bool evaluate_reduce_logical_and(const HostTensorPtr& data,
const HostTensorPtr& axes,
const HostTensorPtr& out)
{
try
{
const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalAnd");
runtime::reference::reduce_logical_and(data->get_data_ptr<char>(),
out->get_data_ptr<char>(),
data->get_shape(),
reduction_axes);
return true;
}
catch (const ngraph_error& e)
{
NGRAPH_WARN << e.what();
return false;
}
}
}
bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
if (data->get_element_type() != element::boolean || axes->get_element_type() != element::i64)
{
return false;
}
else
{
return evaluate_reduce_logical_and(data, axes, out);
}
}

View File

@ -46,6 +46,9 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};
}
}

View File

@ -15,6 +15,10 @@
//*****************************************************************************
#include "ngraph/op/reduce_logical_or.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/eval_helpers.hpp"
#include "ngraph/runtime/reference/logical_reduction.hpp"
using namespace ngraph;
using namespace std;
@ -34,3 +38,45 @@ shared_ptr<Node> op::v1::ReduceLogicalOr::clone_with_new_inputs(const OutputVect
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceLogicalOr>(new_args.at(0), new_args.at(1), get_keep_dims());
}
namespace
{
bool evaluate_reduce_logical_or(const HostTensorPtr& data,
const HostTensorPtr& axes,
const HostTensorPtr& out)
{
try
{
const AxisSet reduction_axes = eval::extract_reduction_axes(axes, "ReduceLogicalOr");
runtime::reference::reduce_logical_or(data->get_data_ptr<char>(),
out->get_data_ptr<char>(),
data->get_shape(),
reduction_axes);
return true;
}
catch (const ngraph_error& e)
{
NGRAPH_WARN << e.what();
return false;
}
}
}
bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
const auto& data = inputs[0];
const auto& axes = inputs[1];
const auto& out = outputs[0];
if (data->get_element_type() != element::boolean || axes->get_element_type() != element::i64)
{
return false;
}
else
{
return evaluate_reduce_logical_or(data, axes, out);
}
}

View File

@ -46,6 +46,9 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};
}
}

View File

@ -19,25 +19,11 @@
#include "ngraph/op/reduce_logical_and.hpp"
#include "ngraph/op/reduce_logical_or.hpp"
#include "ngraph/runtime/reference/any.hpp"
#include "ngraph/runtime/reference/logical_reduction.hpp"
using namespace std;
using namespace ngraph;
static Shape get_shape_no_keep_dims(const AxisSet& reduction_axes, const Shape& input_shape)
{
Shape shape_no_keep_dims;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (reduction_axes.count(i) == 0)
{
shape_no_keep_dims.push_back(input_shape[i]);
}
}
return shape_no_keep_dims;
}
static shared_ptr<op::Constant> fold_constant_logical_reduction(shared_ptr<op::Constant> constant,
shared_ptr<Node> reduction_node)
{
@ -57,33 +43,16 @@ static shared_ptr<op::Constant> fold_constant_logical_reduction(shared_ptr<op::C
const auto reduction_axes = reduce_and->get_reduction_axes();
const auto input_shape = reduce_and->get_input_shape(0);
const char* arg = constant->get_data_ptr<char>();
CoordinateTransform output_transform(get_shape_no_keep_dims(reduction_axes, input_shape));
for (const Coordinate& output_coord : output_transform)
{
data_ptr[output_transform.index(output_coord)] = 1;
}
CoordinateTransform input_transform(constant->get_output_shape(0));
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, reduction_axes);
data_ptr[output_transform.index(output_coord)] =
data_ptr[output_transform.index(output_coord)] &&
arg[input_transform.index(input_coord)];
}
runtime::reference::reduce_logical_and(arg, data_ptr, input_shape, reduction_axes);
}
else if (auto reduce_or = as_type_ptr<::ngraph::op::v1::ReduceLogicalOr>(reduction_node))
{
const auto reduction_axes = reduce_or->get_reduction_axes();
const auto input_shape = reduce_or->get_input_shape(0);
const char* arg = constant->get_data_ptr<char>();
runtime::reference::any(constant->get_data_ptr<char>(),
data_ptr,
constant->get_output_shape(0),
get_shape_no_keep_dims(reduction_axes, input_shape),
reduction_axes);
runtime::reference::reduce_logical_or(arg, data_ptr, input_shape, reduction_axes);
}
else
{

View File

@ -0,0 +1,42 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include "ngraph/check.hpp"
#include "ngraph/runtime/reference/eval_helpers.hpp"
namespace ngraph
{
namespace eval
{
AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name)
{
const auto axes_count = axes->get_element_count();
const auto axes_buffer = axes->get_data_ptr<int64_t>();
const bool negative_axis_received = std::any_of(
axes_buffer, axes_buffer + axes_count, [](const int64_t axis) { return axis < 0; });
NGRAPH_CHECK(!negative_axis_received,
"Negative axis value received in the ",
op_name,
" evaluation. This case is not supported.");
return AxisSet(std::vector<AxisSet::value_type>(axes_buffer, axes_buffer + axes_count));
}
}
}

View File

@ -0,0 +1,27 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/runtime/host_tensor.hpp"
namespace ngraph
{
namespace eval
{
AxisSet extract_reduction_axes(const HostTensorPtr& axes, const char* op_name);
}
}

View File

@ -0,0 +1,86 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath>
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/runtime/reference/any.hpp"
#include "ngraph/shape_util.hpp"
namespace ngraph
{
namespace runtime
{
namespace
{
Shape get_shape_no_keep_dims(const AxisSet& reduction_axes, const Shape& input_shape)
{
Shape shape_no_keep_dims;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (reduction_axes.count(i) == 0)
{
shape_no_keep_dims.push_back(input_shape[i]);
}
}
return shape_no_keep_dims;
}
}
namespace reference
{
static inline void reduce_logical_and(const char* arg,
char* out,
const Shape& input_shape,
const AxisSet& reduction_axes)
{
CoordinateTransform output_transform(
get_shape_no_keep_dims(reduction_axes, input_shape));
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = 1;
}
CoordinateTransform input_transform(input_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, reduction_axes);
out[output_transform.index(output_coord)] =
out[output_transform.index(output_coord)] &&
arg[input_transform.index(input_coord)];
}
}
static inline void reduce_logical_or(const char* arg,
char* out,
const Shape& input_shape,
const AxisSet& reduction_axes)
{
runtime::reference::any(arg,
out,
input_shape,
get_shape_no_keep_dims(reduction_axes, input_shape),
reduction_axes);
}
}
}
}

View File

@ -51,6 +51,7 @@
#include "ngraph/op/not.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/range.hpp"
#include "ngraph/op/reduce_logical_and.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/round.hpp"
@ -1892,3 +1893,25 @@ TEST(eval, topk_v0_param_dyn_k0)
vector<int32_t> expec0{0, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1, 0};
ASSERT_EQ(result0_val, expec0);
}
TEST(eval, reduce_logical_and__neg_axis)
{
const auto data = make_shared<op::Parameter>(element::boolean, Shape{2, 2, 2});
const auto axes = make_shared<op::Parameter>(element::i64, Shape{});
const auto op = make_shared<op::v1::ReduceLogicalAnd>(data, axes);
auto fun = make_shared<Function>(op, ParameterVector{data, axes});
auto result = make_shared<HostTensor>();
// when ReduceLogicalAnd node evaluator returns false -> the Function object throws
EXPECT_THROW(
fun->evaluate({result},
{
make_host_tensor<element::Type_t::boolean>(
Shape{2, 2, 2}, {true, false, true, false, true, false, true, false}),
make_host_tensor<element::Type_t::i64>(Shape{}, {-1}),
}),
ngraph::ngraph_error);
}