Roll reference implementation (#4947)
* Added reference implementation for Roll operation. * Small corrections. * Removed duplicate test disabling. * Changed implementation using manual data manipulation. * Removed unnecessary function. * Corrected tests, added converting axes and shift to int64.
This commit is contained in:
parent
89b876b592
commit
440d2abd1f
@ -0,0 +1,94 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2021 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace runtime
|
||||
{
|
||||
namespace reference
|
||||
{
|
||||
size_t shift_pos(size_t pos_in_spanned_data,
|
||||
size_t dim_shift,
|
||||
size_t spanned_shape_size,
|
||||
size_t dim_size)
|
||||
{
|
||||
size_t pos = pos_in_spanned_data / spanned_shape_size % dim_size;
|
||||
size_t shift = (pos + dim_shift) % dim_size - pos;
|
||||
return pos_in_spanned_data + shift * spanned_shape_size;
|
||||
}
|
||||
|
||||
void roll(const char* arg,
|
||||
const int64_t* shift,
|
||||
const int64_t* axes,
|
||||
char* out,
|
||||
const Shape& arg_shape,
|
||||
const Shape& shift_shape,
|
||||
const Shape& axes_shape,
|
||||
size_t elem_size)
|
||||
{
|
||||
std::vector<int64_t> axes_vector = std::vector<int64_t>(axes, axes + axes_shape[0]);
|
||||
for (auto& axis : axes_vector)
|
||||
{
|
||||
if (axis < 0)
|
||||
axis += arg_shape.size();
|
||||
}
|
||||
|
||||
std::vector<int64_t> shift_vector = std::vector<int64_t>(arg_shape.size(), 0);
|
||||
for (size_t i = 0; i < axes_vector.size(); i++)
|
||||
{
|
||||
int64_t shift_sum = shift_vector[axes_vector[i]] + shift[i];
|
||||
int64_t dim_size = arg_shape[axes_vector[i]];
|
||||
// the modulo which supports negative values
|
||||
shift_vector[axes_vector[i]] = (shift_sum % dim_size + dim_size) % dim_size;
|
||||
}
|
||||
|
||||
size_t last_dim = arg_shape[arg_shape.size() - 1];
|
||||
size_t start = 0;
|
||||
while (start < shape_size(arg_shape))
|
||||
{
|
||||
size_t left_block_size = last_dim - shift_vector[shift_vector.size() - 1];
|
||||
size_t p1 = start;
|
||||
size_t p2 = start + left_block_size;
|
||||
size_t spanned_shape_size = 1;
|
||||
for (int dim = arg_shape.size() - 1; dim >= 0; dim--)
|
||||
{
|
||||
p1 = shift_pos(p1, shift_vector[dim], spanned_shape_size, arg_shape[dim]);
|
||||
p2 = shift_pos(p2, shift_vector[dim], spanned_shape_size, arg_shape[dim]);
|
||||
spanned_shape_size *= arg_shape[dim];
|
||||
}
|
||||
|
||||
if (left_block_size > 0)
|
||||
memcpy(out + p1 * elem_size,
|
||||
arg + start * elem_size,
|
||||
left_block_size * elem_size);
|
||||
|
||||
size_t right_block_size = last_dim - left_block_size;
|
||||
if (right_block_size > 0)
|
||||
memcpy(out + p2 * elem_size,
|
||||
arg + (start + left_block_size) * elem_size,
|
||||
right_block_size * elem_size);
|
||||
|
||||
start += last_dim;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -379,6 +379,7 @@ set(MULTI_TEST_SRC
|
||||
backend/reverse_sequence.in.cpp
|
||||
backend/reverse.in.cpp
|
||||
backend/roi_pooling.in.cpp
|
||||
backend/roll.in.cpp
|
||||
backend/round.in.cpp
|
||||
backend/scatter_nd_update.in.cpp
|
||||
backend/select.in.cpp
|
||||
|
206
ngraph/test/backend/roll.in.cpp
Normal file
206
ngraph/test/backend/roll.in.cpp
Normal file
@ -0,0 +1,206 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2021 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/opsets/opset7.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, roll_2d_input)
|
||||
{
|
||||
Shape shape{4, 3};
|
||||
auto x = make_shared<opset7::Parameter>(element::f32, shape);
|
||||
auto shift = make_shared<opset7::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
|
||||
auto axes = make_shared<opset7::Constant>(element::i64, Shape{1}, vector<int64_t>{0});
|
||||
auto f = make_shared<Function>(make_shared<opset7::Roll>(x, shift, axes), ParameterVector{x});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto x_tensor = backend->create_tensor(element::f32, shape);
|
||||
copy_data(x_tensor,
|
||||
vector<float>{50.2907,
|
||||
70.8054,
|
||||
-68.3403,
|
||||
62.6444,
|
||||
4.9748,
|
||||
-18.5551,
|
||||
40.5383,
|
||||
-15.3859,
|
||||
-4.5881,
|
||||
-43.3479,
|
||||
94.1676,
|
||||
-95.7097});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {x_tensor});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{-43.3479,
|
||||
94.1676,
|
||||
-95.7097,
|
||||
50.2907,
|
||||
70.8054,
|
||||
-68.3403,
|
||||
62.6444,
|
||||
4.9748,
|
||||
-18.5551,
|
||||
40.5383,
|
||||
-15.3859,
|
||||
-4.5881}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, roll_2d_input_negative_shift)
|
||||
{
|
||||
Shape shape{4, 3};
|
||||
auto x = make_shared<opset7::Parameter>(element::f32, shape);
|
||||
auto shift = make_shared<opset7::Constant>(element::i32, Shape{2}, vector<int32_t>{-1, 2});
|
||||
auto axes = make_shared<opset7::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
|
||||
auto f = make_shared<Function>(make_shared<opset7::Roll>(x, shift, axes), ParameterVector{x});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto x_tensor = backend->create_tensor(element::f32, shape);
|
||||
copy_data(x_tensor,
|
||||
vector<float>{50.2907,
|
||||
70.8054,
|
||||
-68.3403,
|
||||
62.6444,
|
||||
4.9748,
|
||||
-18.5551,
|
||||
40.5383,
|
||||
-15.3859,
|
||||
-4.5881,
|
||||
-43.3479,
|
||||
94.1676,
|
||||
-95.7097});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {x_tensor});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{4.9748,
|
||||
-18.5551,
|
||||
62.6444,
|
||||
-15.3859,
|
||||
-4.5881,
|
||||
40.5383,
|
||||
94.1676,
|
||||
-95.7097,
|
||||
-43.3479,
|
||||
70.8054,
|
||||
-68.3403,
|
||||
50.2907}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, roll_repeated_axes)
|
||||
{
|
||||
Shape shape{4, 3};
|
||||
auto x = make_shared<opset7::Parameter>(element::i64, shape);
|
||||
auto shift = make_shared<opset7::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 1});
|
||||
auto axes = make_shared<opset7::Constant>(element::i64, Shape{3}, vector<int64_t>{0, 1, 0});
|
||||
auto f = make_shared<Function>(make_shared<opset7::Roll>(x, shift, axes), ParameterVector{x});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto x_tensor = backend->create_tensor(element::i64, shape);
|
||||
copy_data(x_tensor, vector<int64_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
|
||||
auto result = backend->create_tensor(element::i64, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {x_tensor});
|
||||
EXPECT_TRUE(test::all_close((vector<int64_t>{8, 9, 7, 11, 12, 10, 2, 3, 1, 5, 6, 4}),
|
||||
read_vector<int64_t>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, roll_3d_input)
|
||||
{
|
||||
Shape shape{4, 2, 3};
|
||||
auto x = make_shared<opset7::Parameter>(element::f32, shape);
|
||||
auto shift = make_shared<opset7::Constant>(element::i64, Shape{3}, vector<int64_t>{2, 1, 3});
|
||||
auto axes = make_shared<opset7::Constant>(element::i64, Shape{3}, vector<int64_t>{0, 1, 2});
|
||||
auto f = make_shared<Function>(make_shared<opset7::Roll>(x, shift, axes), ParameterVector{x});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
auto x_tensor = backend->create_tensor(element::f32, shape);
|
||||
copy_data(x_tensor, vector<float>{94.0773, 33.0599, 58.1724, -20.3640, 54.5372, -54.3023,
|
||||
10.4662, 11.7532, -11.7692, 56.4223, -95.3774, 8.8978,
|
||||
1.9305, 13.8025, 12.0827, 81.4669, 19.5321, -8.9553,
|
||||
-75.3226, 20.8033, 20.7660, 62.7361, 14.9372, -33.0825});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {x_tensor});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{81.4669, 19.5321, -8.9553, 1.9305, 13.8025, 12.0827,
|
||||
62.7361, 14.9372, -33.0825, -75.3226, 20.8033, 20.7660,
|
||||
-20.3640, 54.5372, -54.3023, 94.0773, 33.0599, 58.1724,
|
||||
56.4223, -95.3774, 8.8978, 10.4662, 11.7532, -11.7692}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, roll_3d_input_negative_shift)
|
||||
{
|
||||
Shape shape{4, 2, 3};
|
||||
auto x = make_shared<opset7::Parameter>(element::f32, shape);
|
||||
auto shift = make_shared<opset7::Constant>(element::i32, Shape{3}, vector<int32_t>{-5, 1, 3});
|
||||
auto axes = make_shared<opset7::Constant>(element::i64, Shape{3}, vector<int64_t>{0, 1, 1});
|
||||
auto f = make_shared<Function>(make_shared<opset7::Roll>(x, shift, axes), ParameterVector{x});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
auto x_tensor = backend->create_tensor(element::f32, shape);
|
||||
copy_data(x_tensor, vector<float>{94.0773, 33.0599, 58.1724, -20.3640, 54.5372, -54.3023,
|
||||
10.4662, 11.7532, -11.7692, 56.4223, -95.3774, 8.8978,
|
||||
1.9305, 13.8025, 12.0827, 81.4669, 19.5321, -8.9553,
|
||||
-75.3226, 20.8033, 20.7660, 62.7361, 14.9372, -33.0825});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {x_tensor});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{10.4662, 11.7532, -11.7692, 56.4223, -95.3774, 8.8978,
|
||||
1.9305, 13.8025, 12.0827, 81.4669, 19.5321, -8.9553,
|
||||
-75.3226, 20.8033, 20.7660, 62.7361, 14.9372, -33.0825,
|
||||
94.0773, 33.0599, 58.1724, -20.3640, 54.5372, -54.3023}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, roll_negative_axes)
|
||||
{
|
||||
Shape shape{4, 2, 3};
|
||||
auto x = make_shared<opset7::Parameter>(element::i32, shape);
|
||||
auto shift = make_shared<opset7::Constant>(element::i64, Shape{3}, vector<int64_t>{2, -1, -7});
|
||||
auto axes = make_shared<opset7::Constant>(element::i32, Shape{3}, vector<int32_t>{-1, -1, -2});
|
||||
auto f = make_shared<Function>(make_shared<opset7::Roll>(x, shift, axes), ParameterVector{x});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
auto x_tensor = backend->create_tensor(element::i32, shape);
|
||||
copy_data(x_tensor, vector<int32_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
||||
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
|
||||
auto result = backend->create_tensor(element::i32, shape);
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {x_tensor});
|
||||
EXPECT_TRUE(test::all_close((vector<int32_t>{6, 4, 5, 3, 1, 2, 12, 10, 11, 9, 7, 8,
|
||||
18, 16, 17, 15, 13, 14, 24, 22, 23, 21, 19, 20}),
|
||||
read_vector<int32_t>(result)));
|
||||
}
|
@ -1060,6 +1060,14 @@ rnn_cell_zero_bias_default_attrs
|
||||
# Activation function hardsigmoid is not supported
|
||||
gru_cell_hardsigmoid_activation_function
|
||||
|
||||
# Roll is not implemented yet for CPU, GPU
|
||||
roll_2d_input
|
||||
roll_2d_input_negative_shift
|
||||
roll_repeated_axes
|
||||
roll_3d_input
|
||||
roll_3d_input_negative_shift
|
||||
roll_negative_axes
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# Inference Engine CPU plugin excludes
|
||||
|
@ -52,6 +52,7 @@
|
||||
#include <ngraph/runtime/reference/reverse_sequence.hpp>
|
||||
#include <ngraph/runtime/reference/rnn_cell.hpp>
|
||||
#include <ngraph/runtime/reference/roi_pooling.hpp>
|
||||
#include <ngraph/runtime/reference/roll.hpp>
|
||||
#include <ngraph/runtime/reference/scatter_nd_update.hpp>
|
||||
#include <ngraph/runtime/reference/select.hpp>
|
||||
#include <ngraph/runtime/reference/selu.hpp>
|
||||
@ -2126,6 +2127,48 @@ namespace
|
||||
return true;
|
||||
}
|
||||
|
||||
template <element::Type_t ET>
|
||||
bool evaluate(const shared_ptr<op::v7::Roll>& op,
|
||||
const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs)
|
||||
{
|
||||
const auto& shiftType = inputs[1]->get_element_type();
|
||||
std::vector<int64_t> shift_int64;
|
||||
if (shiftType == element::Type_t::i32)
|
||||
{
|
||||
auto shift = inputs[1]->get_data_ptr<const int32_t>();
|
||||
shift_int64.resize(shape_size(inputs[1]->get_shape()));
|
||||
std::transform(shift,
|
||||
shift + shape_size(inputs[1]->get_shape()),
|
||||
shift_int64.begin(),
|
||||
[](const int32_t& elem) { return static_cast<int64_t>(elem); });
|
||||
}
|
||||
const auto& axesType = inputs[2]->get_element_type();
|
||||
std::vector<int64_t> axes_int64;
|
||||
if (axesType == element::Type_t::i32)
|
||||
{
|
||||
auto axes = inputs[2]->get_data_ptr<const int32_t>();
|
||||
axes_int64.resize(shape_size(inputs[2]->get_shape()));
|
||||
std::transform(axes,
|
||||
axes + shape_size(inputs[2]->get_shape()),
|
||||
axes_int64.begin(),
|
||||
[](const int32_t& elem) { return static_cast<int64_t>(elem); });
|
||||
}
|
||||
runtime::reference::roll(inputs[0]->get_data_ptr<const char>(),
|
||||
inputs[1]->get_element_type() != element::Type_t::i64
|
||||
? shift_int64.data()
|
||||
: inputs[1]->get_data_ptr<const int64_t>(),
|
||||
inputs[2]->get_element_type() != element::Type_t::i64
|
||||
? axes_int64.data()
|
||||
: inputs[2]->get_data_ptr<const int64_t>(),
|
||||
outputs[0]->get_data_ptr<char>(),
|
||||
inputs[0]->get_shape(),
|
||||
inputs[1]->get_shape(),
|
||||
inputs[2]->get_shape(),
|
||||
inputs[0]->get_element_type().size());
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool evaluate_node(std::shared_ptr<Node> node,
|
||||
const HostTensorVector& outputs,
|
||||
|
@ -85,3 +85,5 @@ NGRAPH_OP(Round, op::v5)
|
||||
NGRAPH_OP(CTCGreedyDecoderSeqLen, op::v6)
|
||||
NGRAPH_OP(GatherElements, op::v6)
|
||||
NGRAPH_OP(MVN, ngraph::op::v6)
|
||||
|
||||
NGRAPH_OP(Roll, ngraph::op::v7)
|
||||
|
Loading…
Reference in New Issue
Block a user