[Core] Move op_eval tests into Template plugin (#13001)

* Remove redundant tests

* Remove MatMul op_eval tests

* Remove RoiAlign, RoiPooling op_eval tests

* Move VariadicSplit op_eval tests to TemplatePlugin

* Move Transpose op_eval tests to TemplatePlugin

* Move Interpolate op_eval tests to Template Plugin

* Redo Interpolate_v4 tests

* Adopt to new exception messages
This commit is contained in:
Tomasz Jankowski 2022-10-14 06:45:28 +02:00 committed by GitHub
parent 4c329fbe5a
commit b424ee2568
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 697 additions and 3415 deletions

View File

@ -97,13 +97,13 @@ std::string get_file_name(const std::string& path);
* @brief Interface function to get absolute path of file
* @param path - path to file, can be relative to current working directory
* @return Absolute path of file
* @throw runtime_exception if any error occurred
* @throw runtime_error if any error occurred
*/
std::string get_absolute_file_path(const std::string& path);
/**
* @brief Interface function to create directorty recursively by given path
* @param path - path to file, can be relative to current working directory
* @throw runtime_exception if any error occurred
* @throw runtime_error if any error occurred
*/
void create_directory_recursive(const std::string& path);

View File

@ -446,29 +446,7 @@ if (ENABLE_TEMPLATE)
list(APPEND UNIT_TESTS_DEPENDENCIES openvino_template_plugin)
set(OP_EVAL_TEST_SRC
# It should be a part of template plugin
op_eval/binary_convolution.cpp
op_eval/bucketize.cpp
op_eval/clamp.cpp
op_eval/einsum.cpp
op_eval/floor_mod.cpp
op_eval/gelu.cpp
op_eval/hsigmoid.cpp
op_eval/hswish.cpp
op_eval/interpolate.cpp
op_eval/loop.cpp
op_eval/matmul.cpp
op_eval/memory.cpp
op_eval/mish.cpp
op_eval/non_zero.cpp
op_eval/roi_align.cpp
op_eval/roi_pooling.cpp
op_eval/round.cpp
op_eval/softplus.cpp
op_eval/split.cpp
op_eval/swish.cpp
op_eval/strided_slice.cpp
op_eval/transpose.cpp
op_eval/variadic_split.cpp)
op_eval/memory.cpp)
endif()
# SOURCE FOR FRONTEND TESTING

View File

@ -1,219 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "engines_util/execute_tools.hpp"
#include "engines_util/test_case.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp"
using namespace std;
using namespace ngraph;
template <typename T_IN, typename T_KERN>
static void BinaryConvolutionTest(const std::vector<T_IN>& inputs,
const Shape inputs_shape,
const std::vector<T_KERN>& filters,
const Shape filters_shape,
const std::vector<T_IN>& outputs,
const Shape outputs_shape,
const Strides& strides,
const CoordinateDiff& padding,
const Strides& dilations) {
const CoordinateDiff pads_begin{padding};
const CoordinateDiff pads_end{padding};
const op::PadType auto_pad{op::PadType::EXPLICIT};
float pad_value = 0;
auto inputs_param = make_shared<op::Parameter>(element::from<T_IN>(), inputs_shape);
auto filters_const = make_shared<op::Constant>(element::u1, filters_shape, &filters[0]);
auto bin_conv =
make_shared<op::v1::BinaryConvolution>(inputs_param,
filters_const,
strides,
pads_begin,
pads_end,
dilations,
op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT,
pad_value,
auto_pad);
auto f = make_shared<Function>(bin_conv, ParameterVector{inputs_param});
auto test_case = test::TestCase(f);
test_case.add_input(inputs_shape, inputs);
test_case.add_expected_output(outputs_shape, outputs);
test_case.run();
}
template <typename T_IN>
static void ConvolutionTest(const std::vector<T_IN>& inputs,
const Shape inputs_shape,
const std::vector<T_IN>& filters,
const Shape filters_shape,
const std::vector<T_IN>& outputs,
const Shape outputs_shape,
const Strides& strides,
const CoordinateDiff& padding,
const Strides& dilations) {
const CoordinateDiff pads_begin{padding};
const CoordinateDiff pads_end{padding};
const op::PadType auto_pad{op::PadType::EXPLICIT};
auto inputs_param = make_shared<op::Parameter>(element::from<T_IN>(), inputs_shape);
auto filters_param = make_shared<op::Parameter>(element::from<T_IN>(), filters_shape);
auto conv = make_shared<op::v1::Convolution>(inputs_param,
filters_param,
strides,
pads_begin,
pads_end,
dilations,
auto_pad);
auto f = make_shared<Function>(conv, ParameterVector{inputs_param, filters_param});
auto test_case = test::TestCase(f);
test_case.add_input(inputs_shape, inputs);
test_case.add_input(filters_shape, filters);
test_case.add_expected_output(outputs_shape, outputs);
test_case.run();
}
// --------------------- 1D convolution ------------------------------------------
TEST(op_eval, bin_convolution_1D_1batch_1channel_no_padding) {
const Strides strides{1};
const CoordinateDiff padding{0};
const Strides dilations{1};
const Shape inputs_shape{1, 1, 5};
const std::vector<float> inputs_conv{1.0f, -1.0f, -1.0f, 1.0f, -1.0f};
const std::vector<float> inputs_bin_conv{1.0f, 0.0f, 0.0f, 1.0f, 0.0f};
const Shape filters_shape{1, 1, 3};
const std::vector<float> filters_conv{1.0f, -1.0f, 1.0f};
const std::vector<uint8_t> filters_bin_conv{0xA0}; // 1010 0000
const Shape outputs_shape{1, 1, 3};
const std::vector<float> outputs{1.0f, 1.0f, -3.0f};
BinaryConvolutionTest(inputs_bin_conv,
inputs_shape,
filters_bin_conv,
filters_shape,
outputs,
outputs_shape,
strides,
padding,
dilations);
ConvolutionTest(inputs_conv,
inputs_shape,
filters_conv,
filters_shape,
outputs,
outputs_shape,
strides,
padding,
dilations);
}
// --------------------- 3D convolution ------------------------------------------
// clang-format off
TEST(op_eval, bin_convolution_3D_1batch_1channel_no_padding)
{
const Strides strides{1, 1, 1};
const CoordinateDiff padding{0, 0, 0};
const Strides dilations{1, 1, 1};
const Shape inputs_shape{1, 1, 4, 4, 4};
const std::vector<float> inputs_conv{
// depth: 1
1.0f, -1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, -1.0f, 1.0f,
1.0f, 1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f, -1.0f,
// depth: 2
-1.0f, 1.0f, 1.0f, 1.0f,
1.0f, -1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f, -1.0f,
// depth: 3
1.0f, 1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f, 1.0f,
1.0f, 1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f, -1.0f,
// depth: 4
1.0f, -1.0f, 1.0f, -1.0f,
1.0f, 1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, -1.0f, 1.0f
};
const std::vector<float> inputs_bin_conv{
// depth: 1
1.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 0.0f,
// depth: 2
0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 0.0f,
// depth: 3
1.0f, 1.0f, 1.0f, 0.0f,
0.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 0.0f,
// depth: 4
1.0f, 0.0f, 1.0f, 0.0f,
1.0f, 1.0f, 0.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f
};
const Shape filters_shape{1, 1, 3, 3, 3};
const std::vector<float> filters_conv{
// depth: 1
1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, -1.0f,
1.0f, -1.0f, 1.0f,
// depth: 2
-1.0f, 1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, -1.0f,
// depth: 3
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
1.0f, 1.0f, 1.0f};
const std::vector<uint8_t> filters_bin_conv{0xAA, 0xBB, 0xB2, 0xE0};
const Shape outputs_shape{1, 1, 2, 2, 2};
const std::vector<float> outputs{
// depth: 1
13.0f, 3.0f,
-3.0f, -3.0f,
// depth: 2
-3.0f, 5.0f,
11.0f, -3.0f};
BinaryConvolutionTest(
inputs_bin_conv,
inputs_shape,
filters_bin_conv,
filters_shape,
outputs,
outputs_shape,
strides,
padding,
dilations);
ConvolutionTest(
inputs_conv,
inputs_shape,
filters_conv,
filters_shape,
outputs,
outputs_shape,
strides,
padding,
dilations);
}
// clang-format off

View File

@ -1,30 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "engines_util/test_case.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
using namespace std;
using namespace ngraph;
TEST(op_eval, DISABLED_bucketize_empty_buckets) {
Shape data_shape{1, 1, 3};
Shape bucket_shape{0};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto buckets = make_shared<op::Parameter>(element::f32, bucket_shape);
const auto bucketize = make_shared<op::v3::Bucketize>(data, buckets);
const auto f = make_shared<Function>(bucketize, ParameterVector{data, buckets});
vector<float> data_vect = {8.f, 1.f, 2.f};
vector<float> buckets_vect;
vector<int> expected_vect = {0, 0, 0};
auto test_case = test::TestCase(f);
test_case.add_input<float>(data_shape, data_vect);
test_case.add_input<float>(bucket_shape, buckets_vect);
test_case.add_expected_output<int>(data_shape, expected_vect);
test_case.run();
}

View File

@ -1,348 +0,0 @@
//*****************************************************************************
// Copyright (C) 2018-2022 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "engines_util/test_case.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
using namespace std;
using namespace ngraph;
namespace {
template <typename T>
void clamp_test(const element::Type& type,
const PartialShape& dynamic_shape,
const Shape& static_shape,
const std::vector<T>& input,
double min,
double max,
const std::vector<T>& output) {
auto data = make_shared<op::Parameter>(type, dynamic_shape);
auto clamp = make_shared<op::Clamp>(data, min, max);
auto function = make_shared<Function>(clamp, ParameterVector{data});
auto test_case = test::TestCase(function);
test_case.template add_input<T>(static_shape, input);
test_case.template add_expected_output<T>(static_shape, output);
return test_case.run();
}
} // namespace
TEST(op_eval, clamp_float_dynamic) {
auto type = element::f32;
typedef float ctype;
auto sshape = Shape{5, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<float>::infinity();
auto ninf = -numeric_limits<float>::infinity();
vector<ctype> input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001};
clamp_test<ctype>(type,
dshape,
sshape,
{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
0.2,
0.6,
{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6});
clamp_test<ctype>(type,
dshape,
sshape,
input,
10.0,
20.0,
{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0});
clamp_test<ctype>(type,
dshape,
sshape,
input,
10.0,
pinf,
{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001});
clamp_test<ctype>(type,
dshape,
sshape,
input,
ninf,
20.0,
{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0});
}
TEST(op_eval, clamp_int8_dynamic) {
auto type = element::i8;
typedef int8_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<double>::infinity();
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_int16_dynamic) {
auto type = element::i16;
typedef int16_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<double>::infinity();
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_int32_dynamic) {
auto type = element::i32;
typedef int32_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<double>::infinity();
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_int64_dynamic) {
auto type = element::i64;
typedef int64_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<double>::infinity();
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_uint8_dynamic) {
auto type = element::u8;
typedef uint8_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
// auto max = numeric_limits<ctype>::max();
// auto pinf = numeric_limits<double>::infinity();
ctype max = (static_cast<ctype>(1) << (numeric_limits<ctype>::digits - 1)) - 1;
auto pinf = static_cast<double>(max);
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_uint16_dynamic) {
auto type = element::u16;
typedef uint16_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
// auto max = numeric_limits<ctype>::max();
// auto pinf = numeric_limits<double>::infinity();
ctype max = (static_cast<ctype>(1) << (numeric_limits<ctype>::digits - 1)) - 1;
auto pinf = static_cast<double>(max);
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
// dynamic shape
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_uint32_dynamic) {
auto type = element::u32;
typedef uint32_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
// auto max = numeric_limits<ctype>::max();
// auto pinf = numeric_limits<double>::infinity();
ctype max = (static_cast<ctype>(1) << (numeric_limits<ctype>::digits - 1)) - 1;
auto pinf = static_cast<double>(max);
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_uint64_dynamic) {
auto type = element::u64;
typedef uint64_t ctype;
auto sshape = Shape{4, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
// TODO: Fix CPU DEX / MLIR correctness bug: using signed comparison for unsigned ints
// auto max = numeric_limits<ctype>::max();
// auto pinf = numeric_limits<double>::infinity();
ctype max = (static_cast<ctype>(1) << (32 - 1)) - 1;
auto pinf = static_cast<double>(max);
auto ninf = -numeric_limits<double>::infinity();
vector<ctype> input{min, max, 9, 10, 11, 19, 20, 21};
clamp_test<ctype>(type, dshape, sshape, input, 10.0, 20.0, {10, 20, 10, 10, 11, 19, 20, 20});
clamp_test<ctype>(type, dshape, sshape, input, 10.0, pinf, {10, max, 10, 10, 11, 19, 20, 21});
clamp_test<ctype>(type, dshape, sshape, input, ninf, 20.0, {min, 20, 9, 10, 11, 19, 20, 20});
}
TEST(op_eval, clamp_float16_dynamic) {
auto type = element::f16;
typedef float16 ctype;
auto sshape = Shape{5, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<float>::infinity();
auto ninf = -numeric_limits<float>::infinity();
vector<ctype> input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001};
clamp_test<ctype>(type,
dshape,
sshape,
{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
0.2,
0.6,
{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6});
clamp_test<ctype>(type,
dshape,
sshape,
input,
10.0,
20.0,
{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0});
clamp_test<ctype>(type,
dshape,
sshape,
input,
10.0,
pinf,
{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001});
clamp_test<ctype>(type,
dshape,
sshape,
input,
ninf,
20.0,
{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0});
}
TEST(op_eval, clamp_bfloat16_dynamic) {
auto type = element::bf16;
typedef bfloat16 ctype;
auto sshape = Shape{5, 2};
auto dshape = PartialShape::dynamic();
auto min = numeric_limits<ctype>::min();
auto max = numeric_limits<ctype>::max();
auto pinf = numeric_limits<float>::infinity();
auto ninf = -numeric_limits<float>::infinity();
vector<ctype> input{min, max, ninf, pinf, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.000001};
clamp_test<ctype>(type,
dshape,
sshape,
{-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8},
0.2,
0.6,
{0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.6, 0.6});
clamp_test<ctype>(type,
dshape,
sshape,
input,
10.0,
20.0,
{10.0, 20.0, 10.0, 20.0, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.0});
clamp_test<ctype>(type,
dshape,
sshape,
input,
10.0,
pinf,
{10.0, max, 10.0, pinf, 10.0, 10.0, 10.000001, 19.999999, 20.0, 20.000001});
clamp_test<ctype>(type,
dshape,
sshape,
input,
ninf,
20.0,
{min, 20.0, ninf, 20.0, 9.99999, 10.0, 10.000001, 19.999999, 20.0, 20.0});
}

View File

@ -1,198 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/einsum.hpp"
#include <string>
#include <vector>
#include "engines_util/test_case.hpp"
#include "gtest/gtest.h"
using namespace std;
using namespace ngraph;
template <typename T>
static void aux_einsum_test(const std::vector<std::vector<T>>& inputs,
const std::vector<Shape>& input_shapes,
const std::string& equation,
const std::vector<T>& expected_result,
const Shape& expected_shape) {
NGRAPH_CHECK(inputs.size() == input_shapes.size());
OutputVector output_vector;
ParameterVector param_vector;
for (const auto& input_shape : input_shapes) {
auto param = make_shared<op::Parameter>(element::from<T>(), input_shape);
output_vector.push_back(param);
param_vector.push_back(param);
}
auto einsum = make_shared<op::v7::Einsum>(output_vector, equation);
auto fun = make_shared<Function>(OutputVector{einsum}, param_vector);
auto test_case = test::TestCase(fun);
for (size_t ind = 0; ind < inputs.size(); ++ind) {
test_case.add_input<T>(input_shapes[ind], inputs[ind]);
}
test_case.add_expected_output<T>(expected_shape, expected_result);
test_case.run();
}
TEST(op_eval, einsum_no_reduction) {
std::string equation = "ab,cd->abcd";
std::vector<float> input1{1.0f, 2.0f};
Shape input1_shape{1, 2};
std::vector<float> input2{3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f};
Shape input2_shape{3, 4};
std::vector<float> expected_result{3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 6.0f, 8.0f, 10.0f, 12.0f,
14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f};
Shape expected_shape{1, 2, 3, 4};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_transpose) {
std::string equation = "ijk->kij";
std::vector<float> input1{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
Shape input1_shape{1, 2, 3};
std::vector<float> expected_result{1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f};
Shape expected_shape{3, 1, 2};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_reduce) {
std::string equation = "ab->a";
std::vector<float> input1{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
Shape input1_shape{2, 3};
std::vector<float> expected_result{6.0f, 15.0f};
Shape expected_shape{2};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_matrix_multiplication) {
std::string equation = "ab,bc->ac";
std::vector<float> input1{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
Shape input1_shape{2, 3};
std::vector<float> input2{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
Shape input2_shape{3, 2};
std::vector<float> expected_result{22.0f, 28.0f, 49.0f, 64.0f};
Shape expected_shape{2, 2};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_multiple_multiplication) {
std::string equation = "ab,bcd,bc->ca";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0, 0.0, 1.0};
Shape input1_shape{2, 4};
std::vector<float> input2{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 5.0, 7.0, 3.0, 7.0, 9.0, 1.0};
Shape input2_shape{4, 3, 1};
std::vector<float> input3{4.0, 3.0, 1.0, 6.0, 4.0, 2.0, 2.0, 5.0, 3.0, 1.0, 9.0, 4.0};
Shape input3_shape{4, 3};
std::vector<float> expected_result{145.0, 171.0, 703.0, 231.0, 85.0, 91.0};
Shape expected_shape{3, 2};
aux_einsum_test({input1, input2, input3},
{input1_shape, input2_shape, input3_shape},
equation,
expected_result,
expected_shape);
}
TEST(op_eval, einsum_ellipsis_one_input_reduction) {
std::string equation = "a...->...";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0, 3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{2, 2, 3};
std::vector<float> expected_result{4.0, 8.0, 4.0, 8.0, 5.0, 13.0};
Shape expected_shape{2, 3};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_ellipsis_one_input_transpose) {
std::string equation = "a...->...a";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0, 3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{2, 2, 3};
std::vector<float> expected_result{
1.0,
3.0,
3.0,
5.0,
2.0,
2.0,
7.0,
1.0,
5.0,
0.0,
6.0,
7.0,
};
Shape expected_shape{2, 3, 2};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_ellipsis_mul_by_1dscalar) {
std::string equation = "ab...,...->ab...";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0, 3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{2, 2, 3};
std::vector<float> input2{0.5};
Shape input2_shape{1};
std::vector<float> expected_result{0.5, 1.5, 1.0, 3.5, 2.5, 3.0, 1.5, 2.5, 1.0, 0.5, 0.0, 3.5};
Shape expected_shape{2, 2, 3};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_ellipsis_complex_mul) {
std::string equation = "a...j,j...->a...";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0, 3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{1, 1, 4, 3};
std::vector<float> input2{3.0, 1.0, 6.0, 2.0, 3.0, 10.0, 9.0, 8.0, 2.0, 9.0, 3.0, 2.0,
4.0, 2.0, 3.0, 1.0, 9.0, 1.0, 11.0, 4.0, 7.0, 2.0, 3.0, 1.0};
Shape input2_shape{3, 4, 2, 1};
std::vector<float> expected_result{27., 85., 37., 66., 30., 58., 50., 8., 37., 123., 55., 83., 16., 48., 24., 30.,
29., 83., 43., 52., 20., 92., 44., 24., 24., 96., 48., 30., 13., 67., 31., 15.};
Shape expected_shape{1, 4, 2, 4};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_diagonal) {
std::string equation = "kii->ki";
std::vector<float> input1{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
Shape input1_shape{1, 3, 3};
std::vector<float> expected_result{1.0f, 5.0f, 9.0f};
Shape expected_shape{1, 3};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_diagonal_with_matmul) {
std::string equation = "abbac,bad->ad";
std::vector<float> input1{4.0, 2.0, 5.0, 4.0, 5.0, 5.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 4.0,
4.0, 5.0, 1.0, 3.0, 1.0, 3.0, 1.0, 4.0, 3.0, 5.0, 4.0, 4.0, 5.0, 4.0, 4.0, 5.0, 4.0, 2.0,
2.0, 2.0, 3.0, 3.0, 1.0, 1.0, 4.0, 3.0, 4.0, 2.0, 2.0, 1.0, 1.0, 2.0, 3.0, 1.0, 1.0, 4.0,
2.0, 3.0, 1.0, 3.0, 4.0, 2.0, 5.0, 5.0, 3.0, 4.0, 3.0, 4.0, 5.0, 4.0, 4.0, 5.0, 1.0, 3.0,
4.0, 4.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 2.0, 5.0, 4.0, 4.0, 2.0, 4.0, 4.0, 1.0, 4.0,
4.0, 5.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 4.0, 2.0, 4.0, 2.0, 5.0, 1.0, 3.0, 2.0, 4.0, 3.0,
5.0, 1.0, 2.0, 3.0, 1.0, 1.0, 2.0, 5.0, 1.0, 1.0, 2.0, 1.0, 4.0, 5.0, 3.0, 4.0, 1.0, 3.0,
3.0, 1.0, 3.0, 2.0, 4.0, 5.0, 1.0, 1.0, 5.0, 4.0, 5.0, 2.0, 2.0, 3.0, 3.0, 1.0, 2.0, 4.0};
Shape input1_shape{2, 3, 3, 2, 4};
std::vector<float> input2{1.0, 4.0, 4.0, 5.0, 3.0, 3.0};
Shape input2_shape{3, 2, 1};
std::vector<float> expected_result{123, 129};
Shape expected_shape{2, 1};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}

View File

@ -1,133 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/floor_mod.hpp"
#include <string>
#include <vector>
#include "engines_util/test_case.hpp"
#include "engines_util/test_engines.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
TEST(op_eval, floor_mod) {
Shape shape{4};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{5.1, -5.1, 5.1, -5.1};
std::vector<float> b{3.0, 3.0, -3.0, -3.0};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {2.1, 0.9, -0.9, -2.1});
test_case.run();
}
TEST(op_eval, floor_mod_broadcasted) {
Shape shape_a{2, 1, 2};
Shape shape_b{2, 1};
Shape shape_r{2, 2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{1, 2, 3, 4};
std::vector<float> b{2, 3};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_r, {1.0f, 0.0f, 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f});
test_case.run();
}
TEST(op_eval, floor_mod_scalars) {
Shape shape{};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{2};
std::vector<float> b{3};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape, {2.0f});
test_case.run();
}
TEST(op_eval, floor_mod_vector_scalar) {
Shape shape_a{2, 2};
Shape shape_b{};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_b);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<float> a{2, 3, 4, 5};
std::vector<float> b{2};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<float>({a, b});
test_case.add_expected_output<float>(shape_a, {0.0f, 1.0f, 0.0f, 1.0f});
test_case.run();
}
TEST(op_eval, floor_mod_int64) {
Shape shape{4};
auto A = make_shared<op::Parameter>(element::i64, shape);
auto B = make_shared<op::Parameter>(element::i64, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<int64_t> a{5, -5, 5, -5};
std::vector<int64_t> b{3, 3, -3, -3};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<int64_t>({a, b});
test_case.add_expected_output<int64_t>(shape, {2, 1, -1, -2});
test_case.run();
}
TEST(op_eval, floor_mod_broadcasted_int64) {
Shape shape_a{2, 1, 2};
Shape shape_b{2, 1};
Shape shape_r{2, 2, 2};
auto A = make_shared<op::Parameter>(element::i64, shape_a);
auto B = make_shared<op::Parameter>(element::i64, shape_b);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<int64_t> a{1, 2, 3, 4};
std::vector<int64_t> b{2, 3};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<int64_t>({a, b});
test_case.add_expected_output<int64_t>(shape_r, {1, 0, 1, 2, 1, 0, 0, 1});
test_case.run();
}
TEST(op_eval, floor_mod_int32) {
Shape shape{4};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto B = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::FloorMod>(A, B), ParameterVector{A, B});
std::vector<int32_t> a{5, -5, 5, -5};
std::vector<int32_t> b{3, 3, -3, -3};
auto test_case = test::TestCase(f);
test_case.add_multiple_inputs<int32_t>({a, b});
test_case.add_expected_output<int32_t>(shape, {2, 1, -1, -2});
test_case.run();
}

View File

@ -1,58 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/gelu.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
TEST(op_eval, gelu_tanh) {
auto p = make_shared<op::Parameter>(element::f32, Shape{});
auto gelu = make_shared<op::v7::Gelu>(p, op::GeluApproximationMode::TANH);
auto fun = make_shared<Function>(OutputVector{gelu}, ParameterVector{p});
std::vector<std::vector<float>> inputs{{-1.0}, {-0.5}, {0}, {0.5}, {1.0}};
std::vector<std::vector<float>> expected_result{{-0.15880796}, {-0.154286}, {0}, {0.345714}, {0.841192}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
OPENVINO_SUPPRESS_DEPRECATED_START
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{}, inputs[i])}));
OPENVINO_SUPPRESS_DEPRECATED_END
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), (Shape{}));
auto result_data = read_vector<float>(result);
EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001);
}
}
TEST(op_eval, gelu_erf) {
auto p = make_shared<op::Parameter>(element::f32, Shape{});
auto gelu = make_shared<op::v7::Gelu>(p, op::GeluApproximationMode::ERF);
auto fun = make_shared<Function>(OutputVector{gelu}, ParameterVector{p});
std::vector<std::vector<float>> inputs{{-1.0}, {-0.5}, {0}, {0.5}, {1.0}};
std::vector<std::vector<float>> expected_result{{-0.15865529}, {-0.15426877}, {0}, {0.34573123}, {0.8413447}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
OPENVINO_SUPPRESS_DEPRECATED_START
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{}, inputs[i])}));
OPENVINO_SUPPRESS_DEPRECATED_END
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), (Shape{}));
auto result_data = read_vector<float>(result);
EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001);
}
}

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/hsigmoid.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
TEST(op_eval, hsigmoid) {
auto p = make_shared<op::Parameter>(element::f32, Shape{3});
auto swish = make_shared<op::v5::HSigmoid>(p);
auto fun = make_shared<Function>(OutputVector{swish}, ParameterVector{p});
std::vector<float> inputs{-0.5f, 0.0f, 0.5f};
std::vector<float> expected_result{0.416667f, 0.5f, 0.583333f};
auto result = make_shared<HostTensor>();
OPENVINO_SUPPRESS_DEPRECATED_START
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{3}, inputs)}));
OPENVINO_SUPPRESS_DEPRECATED_END
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{3});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}

View File

@ -1,36 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/hswish.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, hswish) {
auto p = make_shared<op::Parameter>(element::f32, Shape{3});
auto swish = make_shared<op::v4::HSwish>(p);
auto fun = make_shared<Function>(OutputVector{swish}, ParameterVector{p});
std::vector<float> inputs{-0.5f, 0.0f, 0.5f};
std::vector<float> expected_result{-0.208333f, 0.0f, 0.29166667f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{3}, inputs)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{3});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}

View File

@ -1,536 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/interpolate.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/all_close_f.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace ngraph;
using InterpolateMode = op::v4::Interpolate::InterpolateMode;
using CoordinateTransformMode = op::v4::Interpolate::CoordinateTransformMode;
using Nearest_mode = op::v4::Interpolate::NearestMode;
using InterpolateAttrs = op::v4::Interpolate::InterpolateAttrs;
using ShapeCalcMode = op::v4::Interpolate::ShapeCalcMode;
OPENVINO_SUPPRESS_DEPRECATED_START
// All examples are from ONNX Resize-11 documentation
// (see https://github.com/onnx/onnx/blob/master/docs/Operators.md).
TEST(op_eval, interpolate_v4_cubic) {
auto data_shape = Shape{1, 1, 4, 4};
struct ShapesAndAttrs {
std::vector<int64_t> spatial_shape;
Shape out_shape;
std::vector<float> scales_data;
CoordinateTransformMode transform_mode;
ShapeCalcMode shape_calculation_mode;
};
std::vector<ShapesAndAttrs> shapes_and_attrs = {// resize_downsample_scales_cubic:
ShapesAndAttrs{{3, 3},
Shape{1, 1, 3, 3},
{0.8f, 0.8f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES},
// resize_downsample_sizes_cubic:
ShapesAndAttrs{{3, 3},
Shape{1, 1, 3, 3},
{0.75f, 0.75f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SIZES},
// resize_upsample_scales_cubic:
ShapesAndAttrs{{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES},
// resize_upsample_scales_cubic_asymmetric:
ShapesAndAttrs{{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
CoordinateTransformMode::ASYMMETRIC,
ShapeCalcMode::SCALES},
// resize_upsample_sizes_cubic:
ShapesAndAttrs{{9, 10},
Shape{1, 1, 9, 10},
{2.25f, 2.5f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SIZES},
// resize_downsample_scales_cubic_align_corners:
// (expected values from ONNX documentation are incorrect!)
ShapesAndAttrs{{3, 3},
Shape{1, 1, 3, 3},
{0.8f, 0.8f},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SCALES},
// resize_upsample_scales_cubic_align_corners:
ShapesAndAttrs{{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SCALES}};
std::vector<float> input_data =
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0};
std::vector<std::vector<float>> expected_results = {
{1.47119141, 2.78125, 4.08251953, 6.71142578, 8.02148438, 9.32275391, 11.91650391, 13.2265625, 14.52783203},
{1.63078704f,
3.00462963f,
4.37847222f,
7.12615741f,
8.5f,
9.87384259f,
12.62152778f,
13.99537037f,
15.36921296f},
{0.47265625f, 0.76953125f, 1.24609375f, 1.875f, 2.28125f, 2.91015625f, 3.38671875f, 3.68359375f,
1.66015625f, 1.95703125f, 2.43359375f, 3.0625f, 3.46875f, 4.09765625f, 4.57421875f, 4.87109375f,
3.56640625f, 3.86328125f, 4.33984375f, 4.96875f, 5.375f, 6.00390625f, 6.48046875f, 6.77734375f,
6.08203125f, 6.37890625f, 6.85546875f, 7.484375f, 7.890625f, 8.51953125f, 8.99609375f, 9.29296875f,
7.70703125f, 8.00390625f, 8.48046875f, 9.109375f, 9.515625f, 10.14453125f, 10.62109375f, 10.91796875f,
10.22265625f, 10.51953125f, 10.99609375f, 11.625f, 12.03125f, 12.66015625f, 13.13671875f, 13.43359375f,
12.12890625f, 12.42578125f, 12.90234375f, 13.53125f, 13.9375f, 14.56640625f, 15.04296875f, 15.33984375f,
13.31640625f, 13.61328125f, 14.08984375f, 14.71875f, 15.125f, 15.75390625f, 16.23046875f, 16.52734375f},
{1.0f, 1.40625f, 2.0f, 2.5f, 3.0f, 3.59375f, 4.0f, 4.09375f, 2.625f, 3.03125f,
3.625f, 4.125f, 4.625f, 5.21875f, 5.625f, 5.71875f, 5.0f, 5.40625f, 6.0f, 6.5f,
7.0f, 7.59375f, 8.0f, 8.09375f, 7.0f, 7.40625f, 8.0f, 8.5f, 9.0f, 9.59375f,
10.0f, 10.09375f, 9.0f, 9.40625f, 10.0f, 10.5f, 11.0f, 11.59375f, 12.0f, 12.09375f,
11.375f, 11.78125f, 12.375f, 12.875f, 13.375f, 13.96875f, 14.375f, 14.46875f, 13.0f, 13.40625f,
14.0f, 14.5f, 15.0f, 15.59375f, 16.0f, 16.09375f, 13.375f, 13.78125f, 14.375f, 14.875f,
15.375f, 15.96875f, 16.375f, 16.46875f},
{0.45507922, 0.64057922, 0.97157922, 1.42257922, 1.90732922, 2.22332922, 2.70807922, 3.15907922,
3.49007922, 3.67557922, 1.39437963, 1.57987963, 1.91087963, 2.36187963, 2.84662963, 3.16262963,
3.64737963, 4.09837963, 4.42937963, 4.61487963, 2.95130693, 3.13680693, 3.46780693, 3.91880693,
4.40355693, 4.71955693, 5.20430693, 5.65530693, 5.98630693, 6.17180693, 5.20525069, 5.39075069,
5.72175069, 6.17275069, 6.65750069, 6.97350069, 7.45825069, 7.90925069, 8.24025069, 8.42575069,
6.88975, 7.07525, 7.40625, 7.85725, 8.342, 8.658, 9.14275, 9.59375,
9.92475, 10.11025, 8.57424931, 8.75974931, 9.09074931, 9.54174931, 10.02649931, 10.34249931,
10.82724931, 11.27824931, 11.60924931, 11.79474931, 10.82819307, 11.01369307, 11.34469307, 11.79569307,
12.28044307, 12.59644307, 13.08119307, 13.53219307, 13.86319307, 14.04869307, 12.38512037, 12.57062037,
12.90162037, 13.35262037, 13.83737037, 14.15337037, 14.63812037, 15.08912037, 15.42012037, 15.60562037,
13.32442078, 13.50992078, 13.84092078, 14.29192078, 14.77667078, 15.09267078, 15.57742078, 16.02842078,
16.35942078, 16.54492078},
{1.0f, 2.5f, 4.0f, 7.0f, 8.5f, 10.0f, 13.0f, 14.5f, 16.0f},
{1.0, 1.34110787, 1.80029155, 2.32944606, 2.67055394, 3.19970845, 3.65889213, 4.0,
2.36443149, 2.70553936, 3.16472303, 3.69387755, 4.03498542, 4.56413994, 5.02332362, 5.36443149,
4.20116618, 4.54227405, 5.00145773, 5.53061224, 5.87172012, 6.40087464, 6.86005831, 7.20116618,
6.31778426, 6.65889213, 7.1180758, 7.64723032, 7.98833819, 8.51749271, 8.97667638, 9.31778426,
7.68221574, 8.02332362, 8.48250729, 9.01166181, 9.35276968, 9.8819242, 10.34110787, 10.68221574,
9.79883382, 10.13994169, 10.59912536, 11.12827988, 11.46938776, 11.99854227, 12.45772595, 12.79883382,
11.63556851, 11.97667638, 12.43586006, 12.96501458, 13.30612245, 13.83527697, 14.29446064, 14.63556851,
13.0, 13.34110787, 13.80029155, 14.32944606, 14.67055394, 15.19970845, 15.65889213, 16.0}};
std::size_t i = 0;
for (const auto& s : shapes_and_attrs) {
auto image = std::make_shared<op::Parameter>(element::f32, data_shape);
auto target_spatial_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, s.spatial_shape);
auto scales = op::Constant::create<float>(element::f32, Shape{2}, s.scales_data);
auto axes = op::Constant::create<int64_t>(element::i64, Shape{2}, {2, 3});
InterpolateAttrs attrs;
attrs.mode = InterpolateMode::CUBIC;
attrs.shape_calculation_mode = s.shape_calculation_mode;
attrs.coordinate_transformation_mode = s.transform_mode;
attrs.nearest_mode = Nearest_mode::ROUND_PREFER_FLOOR;
attrs.antialias = false;
attrs.pads_begin = {0, 0, 0, 0};
attrs.pads_end = {0, 0, 0, 0};
attrs.cube_coeff = -0.75;
auto interp = std::make_shared<op::v4::Interpolate>(image, target_spatial_shape, scales, axes, attrs);
auto fun = std::make_shared<Function>(OutputVector{interp}, ParameterVector{image});
auto result = std::make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(data_shape, input_data)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), s.out_shape);
auto result_vector = read_vector<float>(result);
std::size_t num_of_elems = shape_size(s.out_shape);
for (std::size_t j = 0; j < num_of_elems; ++j) {
EXPECT_NEAR(result_vector[j], expected_results[i][j], 1.2e-5);
}
++i;
}
}
TEST(op_eval, interpolate_v4_nearest) {
struct ShapesAndAttrs {
Shape input_data_shape;
std::vector<int64_t> spatial_shape;
Shape out_shape;
std::vector<float> scales_data;
CoordinateTransformMode transform_mode;
ShapeCalcMode shape_calculation_mode;
Nearest_mode nearest_mode;
};
std::vector<ShapesAndAttrs> shapes_and_attrs = {// resize_downsample_scales_nearest:
ShapesAndAttrs{Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.6f, 0.6f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES,
Nearest_mode::ROUND_PREFER_FLOOR},
// resize_downsample_sizes_nearest:
ShapesAndAttrs{Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.5f, 0.5f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SIZES,
Nearest_mode::ROUND_PREFER_FLOOR},
// resize_downsample_sizes_nearest_tf_half_pixel_for_nn:
ShapesAndAttrs{Shape{1, 1, 4, 4},
{3, 2},
Shape{1, 1, 3, 2},
{0.75, 0.5},
CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN,
ShapeCalcMode::SIZES,
Nearest_mode::ROUND_PREFER_FLOOR},
// resize_upsample_scales_nearest:
ShapesAndAttrs{Shape{1, 1, 2, 2},
{4, 6},
Shape{1, 1, 4, 6},
{2.0f, 3.0f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES,
Nearest_mode::ROUND_PREFER_FLOOR},
// resize_upsample_sizes_nearest:
ShapesAndAttrs{Shape{1, 1, 2, 2},
{7, 8},
Shape{1, 1, 7, 8},
{3.5f, 4.0f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SIZES,
Nearest_mode::ROUND_PREFER_FLOOR},
// resize_upsample_sizes_nearest_ceil_half_pixel:
ShapesAndAttrs{Shape{1, 1, 4, 4},
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SIZES,
Nearest_mode::CEIL},
// resize_upsample_sizes_nearest_floor_align_corners:
ShapesAndAttrs{Shape{1, 1, 4, 4},
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SIZES,
Nearest_mode::FLOOR},
// resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric:
ShapesAndAttrs{Shape{1, 1, 4, 4},
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
CoordinateTransformMode::ASYMMETRIC,
ShapeCalcMode::SIZES,
Nearest_mode::ROUND_PREFER_CEIL}};
std::vector<std::vector<float>> input_data_list = {
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}};
std::vector<std::vector<float>> expected_results = {
{1.0f, 3.0f},
{1.0f, 3.0f},
{6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f},
{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f},
{1.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f,
2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 1.0f,
2.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 4.0f, 3.0f, 3.0f,
3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 4.0f},
{1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f,
8.0f, 8.0f, 8.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f, 8.0f, 8.0f, 8.0f, 9.0f, 10.0f,
10.0f, 11.0f, 11.0f, 12.0f, 12.0f, 12.0f, 9.0f, 10.0f, 10.0f, 11.0f, 11.0f, 12.0f, 12.0f,
12.0f, 13.0f, 14.0f, 14.0f, 15.0f, 15.0f, 16.0f, 16.0f, 16.0f, 13.0f, 14.0f, 14.0f, 15.0f,
15.0f, 16.0f, 16.0f, 16.0f, 13.0f, 14.0f, 14.0f, 15.0f, 15.0f, 16.0f, 16.0f, 16.0f},
{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f,
1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f, 8.0f,
5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 9.0f, 10.0f, 10.0f, 11.0f, 11.0f, 12.0f,
9.0f, 9.0f, 9.0f, 10.0f, 10.0f, 11.0f, 11.0f, 12.0f, 13.0f, 13.0f, 13.0f, 14.0f, 14.0f, 15.0f, 15.0f, 16.0f},
{1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0,
5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 12.0, 12.0,
9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 12.0, 12.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0,
13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0}};
std::size_t i = 0;
for (const auto& s : shapes_and_attrs) {
auto image = std::make_shared<op::Parameter>(element::f32, s.input_data_shape);
auto target_spatial_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, s.spatial_shape);
auto scales = op::Constant::create<float>(element::f32, Shape{2}, s.scales_data);
auto axes = op::Constant::create<int64_t>(element::i64, Shape{2}, {2, 3});
InterpolateAttrs attrs;
attrs.mode = InterpolateMode::NEAREST;
attrs.shape_calculation_mode = s.shape_calculation_mode;
attrs.coordinate_transformation_mode = s.transform_mode;
attrs.nearest_mode = s.nearest_mode;
attrs.antialias = false;
attrs.pads_begin = {0, 0, 0, 0};
attrs.pads_end = {0, 0, 0, 0};
attrs.cube_coeff = -0.75;
auto interp = std::make_shared<op::v4::Interpolate>(image, target_spatial_shape, scales, axes, attrs);
auto fun = std::make_shared<Function>(OutputVector{interp}, ParameterVector{image});
auto result = std::make_shared<HostTensor>();
ASSERT_TRUE(
fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(s.input_data_shape, input_data_list[i])}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), s.out_shape);
auto result_vector = read_vector<float>(result);
std::size_t num_of_elems = shape_size(s.out_shape);
for (std::size_t j = 0; j < num_of_elems; ++j) {
EXPECT_NEAR(result_vector[j], expected_results[i][j], 0.0000002);
}
++i;
}
}
TEST(op_eval, interpolate_v4_linear_onnx) {
struct ShapesAndAttrs {
Shape input_data_shape;
std::vector<int64_t> spatial_shape;
Shape out_shape;
std::vector<float> scales_data;
CoordinateTransformMode transform_mode;
ShapeCalcMode shape_calculation_mode;
};
std::vector<ShapesAndAttrs> shapes_and_attrs = {// resize_downsample_scales_linear
ShapesAndAttrs{Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.6f, 0.6f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES},
// resize_downsample_sizes_linear_pytorch_half_pixel
ShapesAndAttrs{Shape{1, 1, 4, 4},
{3, 1},
Shape{1, 1, 3, 1},
{0.75f, 0.25f},
CoordinateTransformMode::PYTORCH_HALF_PIXEL,
ShapeCalcMode::SIZES},
// resize_upsample_scales_linear
ShapesAndAttrs{Shape{1, 1, 2, 2},
{4, 4},
Shape{1, 1, 4, 4},
{2.0f, 2.0f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES},
// resize_upsample_scales_linear_align_corners
ShapesAndAttrs{Shape{1, 1, 2, 2},
{4, 4},
Shape{1, 1, 4, 4},
{2.0f, 2.0f},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SCALES},
// resize_downsample_scales_linear_align_corners:
// (expected values from ONNX documentation are not correct!)
ShapesAndAttrs{Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.6f, 0.6f},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SCALES}};
std::vector<std::vector<float>> input_data_list = {
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}};
std::vector<std::vector<float>> expected_results = {
{2.6666665f, 4.3333331f},
{1.6666666f, 7.0f, 12.333333f},
{1.0f, 1.25f, 1.75f, 2.0f, 1.5f, 1.75f, 2.25f, 2.5f, 2.5f, 2.75f, 3.25f, 3.5f, 3.0f, 3.25f, 3.75f, 4.0f},
{1.0f,
1.33333333f,
1.66666667f,
2.0f,
1.66666667f,
2.0f,
2.33333333f,
2.66666667f,
2.33333333f,
2.66666667f,
3.0f,
3.33333333f,
3.0f,
3.33333333f,
3.66666667f,
4.0f},
{1.0f, 4.0f}};
std::size_t i = 0;
for (const auto& s : shapes_and_attrs) {
auto image = std::make_shared<op::Parameter>(element::f32, s.input_data_shape);
auto target_spatial_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, s.spatial_shape);
auto scales = op::Constant::create<float>(element::f32, Shape{2}, s.scales_data);
auto axes = op::Constant::create<int64_t>(element::i64, Shape{2}, {2, 3});
InterpolateAttrs attrs;
attrs.mode = InterpolateMode::LINEAR_ONNX;
attrs.shape_calculation_mode = s.shape_calculation_mode;
attrs.coordinate_transformation_mode = s.transform_mode;
attrs.nearest_mode = Nearest_mode::ROUND_PREFER_FLOOR;
attrs.antialias = false;
attrs.pads_begin = {0, 0, 0, 0};
attrs.pads_end = {0, 0, 0, 0};
attrs.cube_coeff = -0.75;
auto interp = std::make_shared<op::v4::Interpolate>(image, target_spatial_shape, scales, axes, attrs);
auto fun = std::make_shared<Function>(OutputVector{interp}, ParameterVector{image});
auto result = std::make_shared<HostTensor>();
ASSERT_TRUE(
fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(s.input_data_shape, input_data_list[i])}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), s.out_shape);
auto result_vector = read_vector<float>(result);
std::size_t num_of_elems = shape_size(s.out_shape);
for (std::size_t j = 0; j < num_of_elems; ++j) {
EXPECT_NEAR(result_vector[j], expected_results[i][j], 0.00001);
}
++i;
}
}
TEST(op_eval, interpolate_v4_linear_onnx5d) {
struct ShapesAndAttrs {
Shape input_data_shape;
std::vector<int64_t> spatial_shape;
Shape out_shape;
std::vector<float> scales_data;
CoordinateTransformMode transform_mode;
ShapeCalcMode shape_calculation_mode;
};
std::vector<ShapesAndAttrs> shapes_and_attrs = {// resize_downsample_scales_linear
{Shape{1, 1, 3, 2, 4},
{2, 1, 2},
Shape{1, 1, 2, 1, 2},
{0.8f, 0.6f, 0.6f},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES},
// resize_downsample_scales_linear_align_corners
{Shape{1, 1, 3, 2, 4},
{2, 1, 2},
Shape{1, 1, 2, 1, 2},
{0.8f, 0.6f, 0.6f},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SCALES},
// resize_upsample_scales_linear
{Shape{1, 1, 2, 2, 2},
{4, 4, 4},
Shape{1, 1, 4, 4, 4},
{2.0, 2.0, 2.0},
CoordinateTransformMode::HALF_PIXEL,
ShapeCalcMode::SCALES},
// resize_upsample_scales_linear_align_corners
{Shape{1, 1, 2, 2, 2},
{4, 4, 4},
Shape{1, 1, 4, 4, 4},
{2.0, 2.0, 2.0},
CoordinateTransformMode::ALIGN_CORNERS,
ShapeCalcMode::SCALES},
// resize_downsample_sizes_linear_pytorch_half_pixel
{Shape{1, 1, 2, 4, 4},
{1, 3, 1},
Shape{1, 1, 1, 3, 1},
{0.5, 0.75, 0.25},
CoordinateTransformMode::PYTORCH_HALF_PIXEL,
ShapeCalcMode::SIZES}};
std::vector<std::vector<float>> input_data_list = {
// resize_downsample_scales_linear
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f},
// resize_downsample_scales_linear_align_corners
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f},
// resize_upsample_scales_linear
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
// resize_upsample_scales_linear_align_corners
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
// resize_downsample_sizes_linear_pytorch_half_pixel
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f,
12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f,
23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f}};
std::vector<std::vector<float>> expected_results = {
// resize_downsample_scales_linear
{3.6666665, 5.333333, 13.666666, 15.333333},
// resize_downsample_scales_linear_align_corners
{1.0, 4.0, 17.0, 20.0},
// resize_upsample_scales_linear
{1.0, 1.25, 1.75, 2.0, 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25, 3.5, 3.0, 3.25, 3.75, 4.0,
2.0, 2.25, 2.75, 3.0, 2.5, 2.75, 3.25, 3.5, 3.5, 3.75, 4.25, 4.5, 4.0, 4.25, 4.75, 5.0,
4.0, 4.25, 4.75, 5.0, 4.5, 4.75, 5.25, 5.5, 5.5, 5.75, 6.25, 6.5, 6.0, 6.25, 6.75, 7.0,
5.0, 5.25, 5.75, 6.0, 5.5, 5.75, 6.25, 6.5, 6.5, 6.75, 7.25, 7.5, 7.0, 7.25, 7.75, 8.0},
// resize_upsample_scales_linear_align_corners
{1.0, 1.3333333, 1.6666667, 2.0, 1.6666666, 2.0, 2.3333335, 2.6666667, 2.3333333, 2.6666665,
3.0, 3.3333335, 3.0, 3.3333333, 3.6666665, 4.0, 2.3333335, 2.6666665, 3.0, 3.3333333,
3.0, 3.333333, 3.6666665, 3.9999995, 3.6666665, 4.0, 4.3333335, 4.6666665, 4.333333, 4.6666665,
4.9999995, 5.333333, 3.6666667, 4.0, 4.3333335, 4.6666665, 4.3333335, 4.6666665, 5.0, 5.333333,
5.0, 5.3333335, 5.666667, 6.0, 5.666667, 5.9999995, 6.333333, 6.666667, 5.0, 5.333333,
5.6666665, 6.0, 5.666667, 5.9999995, 6.333333, 6.666666, 6.3333335, 6.666666, 7.0, 7.3333335,
7.0, 7.333333, 7.6666675, 8.0},
// resize_downsample_sizes_linear_pytorch_half_pixel
{1.6666667, 7.0, 12.333333}};
std::size_t i = 0;
for (const auto& s : shapes_and_attrs) {
auto image = std::make_shared<op::Parameter>(element::f32, s.input_data_shape);
auto target_spatial_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, s.spatial_shape);
auto scales = op::Constant::create<float>(element::f32, Shape{3}, s.scales_data);
auto axes = op::Constant::create<int64_t>(element::i64, Shape{3}, {2, 3, 4});
InterpolateAttrs attrs;
attrs.mode = InterpolateMode::LINEAR_ONNX;
attrs.shape_calculation_mode = s.shape_calculation_mode;
attrs.coordinate_transformation_mode = s.transform_mode;
attrs.nearest_mode = Nearest_mode::ROUND_PREFER_FLOOR;
attrs.antialias = false;
attrs.pads_begin = {0, 0, 0, 0, 0};
attrs.pads_end = {0, 0, 0, 0, 0};
attrs.cube_coeff = -0.75;
auto interp = std::make_shared<op::v4::Interpolate>(image, target_spatial_shape, scales, axes, attrs);
auto fun = std::make_shared<Function>(OutputVector{interp}, ParameterVector{image});
auto result = std::make_shared<HostTensor>();
ASSERT_TRUE(
fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(s.input_data_shape, input_data_list[i])}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), s.out_shape);
auto result_vector = read_vector<float>(result);
std::size_t num_of_elems = shape_size(s.out_shape);
for (std::size_t j = 0; j < num_of_elems; ++j) {
EXPECT_NEAR(result_vector[j], expected_results[i][j], 0.00001);
}
++i;
}
}

View File

@ -1,59 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "openvino/opsets/opset8.hpp"
#include "util/all_close_f.hpp"
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, loop_dynamic_shapes) {
// That which we iterate over
auto X = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
auto Y = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
auto M = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xi = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
auto Yi = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
auto M_body = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
auto body_condition = std::make_shared<ov::opset8::Constant>(ngraph::element::boolean, ngraph::Shape{1}, true);
auto trip_count = std::make_shared<ov::opset8::Constant>(ngraph::element::i64, ngraph::Shape{1}, 3);
auto exec_condition = std::make_shared<ov::opset8::Constant>(ngraph::element::boolean, ngraph::Shape{1}, true);
// Body
auto sum = std::make_shared<ov::opset8::Add>(Xi, Yi);
auto Zo = std::make_shared<ov::opset8::Multiply>(sum, M_body);
auto body = std::make_shared<ov::Model>(ov::OutputVector{body_condition, Zo}, ov::ParameterVector{Xi, Yi, M_body});
auto loop = std::make_shared<ov::opset8::Loop>(trip_count, exec_condition);
loop->set_function(body);
loop->set_invariant_input(Xi, X);
loop->set_invariant_input(Yi, Y);
loop->set_merged_input(M_body, M, Zo);
loop->set_special_body_ports(ov::opset8::Loop::SpecialBodyPorts{-1, 0});
// Output is last Zo
auto result = std::make_shared<ov::opset8::Result>(loop->get_iter_value(Zo, -1));
auto f = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{X, Y, M});
std::vector<float> inputX{0, 1, 2, 3}, inputY{1, 2, 3, 4}, inputM{5, 4, 3, 2};
std::vector<float> expected_result{5, 108, 375, 686};
std::vector<float> actual_result(ov::shape_size(ov::Shape{2, 2}), 2);
auto r0 = std::make_shared<ov::HostTensor>();
using namespace ngraph;
ASSERT_TRUE(f->evaluate({r0},
{make_host_tensor<ngraph::element::Type_t::f32>(ov::Shape{2, 2}, inputX),
make_host_tensor<ngraph::element::Type_t::f32>(ov::Shape{2, 2}, inputY),
make_host_tensor<ngraph::element::Type_t::f32>(ov::Shape{2, 2}, inputM)}));
EXPECT_EQ(r0->get_shape(), (ov::Shape{2, 2}));
memcpy(actual_result.data(), r0->get_data_ptr<float>(), ov::shape_size(ov::Shape{2, 2}) * sizeof(float));
EXPECT_TRUE(ngraph::test::all_close_f(expected_result, actual_result));
}

View File

@ -1,271 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/matmul.hpp"
#include <numeric>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "util/all_close_f.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, matmul_dynamic_1D_arg) {
auto arg0 = make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// inner vector contains shapes for arg0, arg1, matmul expected result
std::vector<std::vector<Shape>> shapes{{Shape{2}, Shape{2}, Shape{}},
{Shape{3}, Shape{2, 3, 2}, Shape{2, 2}},
{Shape{2, 2, 3}, Shape{3}, Shape{2, 2}}};
std::vector<std::vector<int32_t>> arg0_inputs{{2, 3}, {10, 11, 12}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
std::vector<std::vector<int32_t>> arg1_inputs{{4, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, {10, 11, 12}};
std::vector<std::vector<int32_t>> expected_result{{23}, {103, 136, 301, 334}, {68, 167, 266, 365}};
for (size_t i = 0; i < arg0_inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::i32>(shapes[i][0], arg0_inputs[i]),
make_host_tensor<element::Type_t::i32>(shapes[i][1], arg1_inputs[i])}));
EXPECT_EQ(result->get_shape(), (shapes[i][2]));
ASSERT_EQ(read_vector<int32_t>(result), expected_result[i]);
}
}
TEST(op_eval, matmul_dynamic_0_elem_arg) {
auto arg0 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// inner vector contains shapes for arg0, arg1, matmul expected result
std::vector<std::vector<Shape>> shapes{{Shape{2, 0}, Shape{0, 2}, Shape{2, 2}},
{Shape{0, 2}, Shape{2, 0}, Shape{0, 0}}};
std::vector<std::vector<float>> arg_inputs{{}, {}};
std::vector<std::vector<float>> expected_result{{0, 0, 0, 0}, {}};
for (size_t i = 0; i < arg_inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(shapes[i][0], arg_inputs[i]),
make_host_tensor<element::Type_t::f32>(shapes[i][1], arg_inputs[i])}));
EXPECT_EQ(result->get_shape(), (shapes[i][2]));
ASSERT_EQ(read_vector<float>(result), expected_result[i]);
}
}
TEST(op_eval, matmul_dynamic_2D_args) {
auto arg0 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// tensor shape for arg0, arg1, matmul result
std::vector<Shape> shapes = {Shape{3, 2}, Shape{2, 4}, Shape{3, 4}};
std::vector<float> arg0_data{1.f, 2.f, 3.f, 4.f, 5.f, 6.f};
std::vector<float> arg1_data{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f};
std::vector<float> expected_result{8.f, 11.f, 14.f, 17.f, 16.f, 23.f, 30.f, 37.f, 24.f, 35.f, 46.f, 57.f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(shapes[0], arg0_data),
make_host_tensor<element::Type_t::f32>(shapes[1], arg1_data)}));
EXPECT_EQ(result->get_shape(), shapes[2]);
ASSERT_TRUE(test::all_close_f(read_vector<float>(result), expected_result));
}
TEST(op_eval, matmul_dynamic_2D_transpose0) {
auto arg0 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, true, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// tensor shapes for arg0, arg1, matmul result
std::vector<Shape> shapes = {Shape{3, 2}, Shape{3, 1}, Shape{2, 1}};
std::vector<float> arg0_data{1.f, 2.f, 3.f, 4.f, 5.f, 6.f};
std::vector<float> arg1_data{0.f, 1.f, 2.f};
std::vector<float> expected_result{13.f, 16.f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(shapes[0], arg0_data),
make_host_tensor<element::Type_t::f32>(shapes[1], arg1_data)}));
EXPECT_EQ(result->get_shape(), shapes[2]);
ASSERT_TRUE(test::all_close_f(read_vector<float>(result), expected_result));
}
TEST(op_eval, matmul_dynamic_2D_transpose1) {
auto arg0 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, true);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// tensor shapes for arg0, arg1, matmul result
std::vector<Shape> shapes = {Shape{3, 2}, Shape{3, 2}, Shape{3, 3}};
std::vector<float> arg0_data{1.f, 2.f, 3.f, 4.f, 5.f, 6.f};
std::vector<float> arg1_data{2.f, 3.f, 4.f, 5.f, 6.f, 7.f};
std::vector<float> expected_result{8.f, 14.f, 20.f, 18.f, 32.f, 46.f, 28.f, 50.f, 72.f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(shapes[0], arg0_data),
make_host_tensor<element::Type_t::f32>(shapes[1], arg1_data)}));
EXPECT_EQ(result->get_shape(), shapes[2]);
ASSERT_TRUE(test::all_close_f(read_vector<float>(result), expected_result));
}
TEST(op_eval, matmul_dynamic_same_batch_size) {
auto arg0 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// tensor shapes for arg0, arg1, matmul result
std::vector<Shape> shapes = {Shape{3, 2, 2, 2}, Shape{3, 2, 2, 1}, Shape{3, 2, 2, 1}};
std::vector<float> arg0_data(shape_size(shapes[0]));
std::vector<float> arg1_data(shape_size(shapes[1]));
// arg0_data is 1.f, 2.f, 3.f, ..., 24.f
iota(arg0_data.begin(), arg0_data.end(), 1.f);
// arg1_data is 0.f, 1.f, 2.f, ..., 11.f
iota(arg1_data.begin(), arg1_data.end(), 0.f);
std::vector<float> expected_result{2.f, 4.f, 28.f, 38.f, 86.f, 104.f, 176.f, 202.f, 298.f, 332.f, 452.f, 494.f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(shapes[0], arg0_data),
make_host_tensor<element::Type_t::f32>(shapes[1], arg1_data)}));
EXPECT_EQ(result->get_shape(), shapes[2]);
ASSERT_TRUE(test::all_close_f(read_vector<float>(result), expected_result));
}
TEST(op_eval, matmul_dynamic_broadcast) {
auto arg0 = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// inner vector contains shapes for arg0, arg1, matmul expected result
std::vector<std::vector<Shape>> shapes{{Shape{2, 1}, Shape{2, 2, 1, 2}, Shape{2, 2, 2, 2}},
{Shape{2, 1, 1, 2}, Shape{2, 1}, Shape{2, 1, 1, 1}},
{Shape{2, 2, 1}, Shape{1, 1, 2}, Shape{2, 2, 2}},
{Shape{1, 1, 2}, Shape{3, 2, 1}, Shape{3, 1, 1}},
{Shape{1, 2, 1, 2}, Shape{2, 1, 2, 1}, Shape{2, 2, 1, 1}},
{Shape{1, 2, 3}, Shape{1, 1, 3, 2}, Shape{1, 1, 2, 2}},
{Shape{4, 1, 2}, Shape{1, 1, 2, 1}, Shape{1, 4, 1, 1}}};
std::vector<std::vector<int64_t>> arg0_inputs{{2, 3},
{1, 2, 3, 4},
{2, 3, 4, 5},
{1, 2},
{0, 1, 2, 3},
{0, 1, 2, 3, 4, 5},
{0, 1, 2, 3, 4, 5, 6, 7}};
std::vector<std::vector<int64_t>> arg1_inputs{{0, 1, 2, 3, 4, 5, 6, 7},
{2, 3},
{4, 5},
{1, 2, 3, 4, 5, 6},
{2, 3, 4, 5},
{2, 3, 4, 5, 6, 7},
{0, 1}};
std::vector<std::vector<int64_t>> expected_result{{0, 2, 0, 3, 4, 6, 6, 9, 8, 10, 12, 15, 12, 14, 18, 21},
{8, 18},
{8, 10, 12, 15, 16, 20, 20, 25},
{5, 11, 17},
{3, 13, 5, 23},
{16, 19, 52, 64},
{1, 3, 5, 7}};
for (size_t i = 0; i < arg0_inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::i64>(shapes[i][0], arg0_inputs[i]),
make_host_tensor<element::Type_t::i64>(shapes[i][1], arg1_inputs[i])}));
EXPECT_EQ(result->get_shape(), (shapes[i][2]));
ASSERT_EQ(read_vector<int64_t>(result), expected_result[i]);
}
}
TEST(op_eval, matmul_dynamic_broadcast_transpose0) {
auto arg0 = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, true, false);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// inner vector contains shapes for arg0, arg1, matmul expected result
std::vector<std::vector<Shape>> shapes{{Shape{3, 2}, Shape{2, 1, 3, 2}, Shape{2, 1, 2, 2}},
{Shape{2, 1, 2, 3}, Shape{2, 1}, Shape{2, 1, 3, 1}},
{Shape{2, 3, 2}, Shape{1, 3, 1}, Shape{2, 2, 1}},
{Shape{1, 2, 3}, Shape{3, 2, 1}, Shape{3, 3, 1}}};
std::vector<std::vector<int64_t>> arg0_inputs{{2, 3, 4, 5, 6, 7},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{2, 3, 4, 5, 6, 7}};
std::vector<std::vector<int64_t>> arg1_inputs{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 3},
{4, 5, 6},
{1, 2, 3, 4, 5, 6}};
std::vector<std::vector<int64_t>> expected_result{{32, 44, 38, 53, 104, 116, 128, 143},
{9, 14, 19, 39, 44, 49},
{49, 64, 139, 154},
{12, 15, 18, 26, 33, 40, 40, 51, 62}};
for (size_t i = 0; i < arg0_inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::i64>(shapes[i][0], arg0_inputs[i]),
make_host_tensor<element::Type_t::i64>(shapes[i][1], arg1_inputs[i])}));
EXPECT_EQ(result->get_shape(), (shapes[i][2]));
ASSERT_EQ(read_vector<int64_t>(result), expected_result[i]);
}
}
TEST(op_eval, matmul_dynamic_broadcast_transpose1) {
auto arg0 = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto arg1 = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto matmul = make_shared<op::MatMul>(arg0, arg1, false, true);
auto fun = make_shared<Function>(OutputVector{matmul}, ParameterVector{arg0, arg1});
// inner vector contains shapes for arg0, arg1, matmul expected result
std::vector<std::vector<Shape>> shapes{{Shape{3, 2}, Shape{2, 1, 3, 2}, Shape{2, 1, 3, 3}},
{Shape{2, 1, 2, 3}, Shape{2, 3}, Shape{2, 1, 2, 2}},
{Shape{2, 3, 2}, Shape{1, 1, 2}, Shape{2, 3, 1}},
{Shape{1, 2, 3}, Shape{3, 1, 3}, Shape{3, 2, 1}}};
std::vector<std::vector<int64_t>> arg0_inputs{{2, 3, 4, 5, 6, 7},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{2, 3, 4, 5, 6, 7}};
std::vector<std::vector<int64_t>> arg1_inputs{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 3, 4, 5, 6, 7},
{5, 6},
{1, 2, 3, 4, 5, 6, 7, 8, 9}};
std::vector<std::vector<int64_t>> expected_result{
{3, 13, 23, 5, 23, 41, 7, 33, 59, 33, 43, 53, 59, 77, 95, 85, 111, 137},
{11, 20, 38, 74, 65, 128, 92, 182},
{17, 39, 61, 83, 105, 127},
{20, 38, 47, 92, 74, 146}};
for (size_t i = 0; i < arg0_inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::i64>(shapes[i][0], arg0_inputs[i]),
make_host_tensor<element::Type_t::i64>(shapes[i][1], arg1_inputs[i])}));
EXPECT_EQ(result->get_shape(), (shapes[i][2]));
ASSERT_EQ(read_vector<int64_t>(result), expected_result[i]);
}
}

View File

@ -1,38 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/mish.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, mish_0D) {
auto p = make_shared<op::Parameter>(element::f32, Shape{});
auto mish = make_shared<op::v4::Mish>(p);
auto fun = make_shared<Function>(OutputVector{mish}, ParameterVector{p});
std::vector<std::vector<float>> inputs{{-1.0}, {1.0}, {20.0}};
std::vector<std::vector<float>> expected_result{{-0.303401}, {0.86509835720062256}, {20.0}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{}, inputs[i])}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), (Shape{}));
auto result_data = read_vector<float>(result);
EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001);
}
}

View File

@ -1,160 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/non_zero.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, non_zero_0D) {
auto p = make_shared<op::Parameter>(element::i32, Shape{});
auto non_zero = make_shared<op::v3::NonZero>(p, element::i64);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<std::vector<int32_t>> inputs{{-1}, {1}, {20}};
std::vector<std::vector<int64_t>> expected_result{{0}, {0}, {0}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::i32>(Shape{}, inputs[i])}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), (Shape{1, 1}));
auto result_data = read_vector<int64_t>(result);
ASSERT_EQ(result_data, expected_result[i]);
}
}
TEST(op_eval, non_zero_0D_0) {
auto p = make_shared<op::Parameter>(element::i32, Shape{});
auto non_zero = make_shared<op::v3::NonZero>(p, element::i64);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::i32>(Shape{}, {0})}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), (Shape{0, 0}));
auto result_data = read_vector<int64_t>(result);
ASSERT_EQ(result_data.data(), nullptr);
}
TEST(op_eval, non_zero_1D) {
Shape p_shape{5};
auto p = make_shared<op::Parameter>(element::f32, p_shape);
auto non_zero = make_shared<op::v3::NonZero>(p, element::i32);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<std::vector<float>> inputs{{1.0, 0, 3.0, 4.0, 0}, {0, 0, 0, 1.0, 3.2}, {1.0, 1.0, 1.0, 1.0, 1.0}};
std::vector<std::vector<int32_t>> expected_result{{0, 2, 3}, {3, 4}, {0, 1, 2, 3, 4}};
std::vector<Shape> expected_output_shape{Shape{1, 3}, Shape{1, 2}, Shape{1, 5}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(p_shape, inputs[i])}));
EXPECT_EQ(result->get_element_type(), element::i32);
EXPECT_EQ(result->get_shape(), expected_output_shape[i]);
auto result_data = read_vector<int32_t>(result);
ASSERT_EQ(result_data, expected_result[i]);
}
}
TEST(op_eval, non_zero_1D_0s) {
Shape p_shape{5};
auto p = make_shared<op::Parameter>(element::f32, p_shape);
auto non_zero = make_shared<op::v3::NonZero>(p, element::i64);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<float> input(shape_size(p_shape), 0);
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(p_shape, input)}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), (Shape{1, 0}));
auto result_data = read_vector<int64_t>(result);
ASSERT_EQ(result_data.data(), nullptr);
}
TEST(op_eval, non_zero_2D) {
Shape p_shape{3, 2};
auto p = make_shared<op::Parameter>(element::i32, p_shape);
auto non_zero = make_shared<op::v3::NonZero>(p);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<std::vector<int32_t>> inputs{{1, 0, 3, 4, 0, 1}, {0, 0, 0, 0, 1, 3}, {1, 1, 1, 1, 1, 1}};
std::vector<std::vector<int64_t>> expected_result{{0, 1, 1, 2, 0, 0, 1, 1},
{2, 2, 0, 1},
{0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1}};
std::vector<Shape> expected_output_shape{Shape{2, 4}, Shape{2, 2}, Shape{2, 6}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::i32>(p_shape, inputs[i])}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), expected_output_shape[i]);
auto result_data = read_vector<int64_t>(result);
ASSERT_EQ(result_data, expected_result[i]);
}
}
TEST(op_eval, non_zero_3D) {
Shape p_shape{3, 2, 2};
auto p = make_shared<op::Parameter>(element::i64, p_shape);
auto non_zero = make_shared<op::v3::NonZero>(p, element::i32);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<std::vector<int64_t>> inputs{{1, 0, 3, 4, 0, 1, 0, 0, 1, 3, 5, 0},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}};
std::vector<std::vector<int32_t>> expected_result{
{0, 0, 0, 1, 2, 2, 2, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0},
{0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}};
std::vector<Shape> expected_output_shape{Shape{3, 7}, Shape{3, 12}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::i64>(p_shape, inputs[i])}));
EXPECT_EQ(result->get_element_type(), element::i32);
EXPECT_EQ(result->get_shape(), expected_output_shape[i]);
auto result_data = read_vector<int32_t>(result);
ASSERT_EQ(result_data, expected_result[i]);
}
}
TEST(op_eval, non_zero_3D_0s) {
Shape p_shape{3, 2, 2};
auto p = make_shared<op::Parameter>(element::i64, p_shape);
auto non_zero = make_shared<op::v3::NonZero>(p, element::i32);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<int64_t> input(shape_size(p_shape), 0);
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::i64>(p_shape, input)}));
EXPECT_EQ(result->get_element_type(), element::i32);
EXPECT_EQ(result->get_shape(), (Shape{p_shape.size(), 0}));
auto result_data = read_vector<int32_t>(result);
ASSERT_EQ(result_data.data(), nullptr);
}
TEST(op_eval, non_zero_dynamic) {
PartialShape p_shape = PartialShape::dynamic();
auto p = make_shared<op::Parameter>(element::i32, p_shape);
auto non_zero = make_shared<op::v3::NonZero>(p);
auto fun = make_shared<Function>(OutputVector{non_zero}, ParameterVector{p});
std::vector<std::vector<int32_t>> inputs{{1, 0, 3, 4, 0}, {0, 0, 0, 0, 1, 3}, {0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0}};
std::vector<Shape> input_shapes{Shape{5}, Shape{3, 2}, Shape{3, 2, 2}};
std::vector<std::vector<int64_t>> expected_result{{0, 2, 3}, {2, 2, 0, 1}, {0, 1, 1, 1, 0, 0}};
std::vector<Shape> expected_output_shape{Shape{1, 3}, Shape{2, 2}, Shape{3, 2}};
for (size_t i = 0; i < inputs.size(); i++) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::i32>(input_shapes[i], inputs[i])}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), expected_output_shape[i]);
auto result_data = read_vector<int64_t>(result);
ASSERT_EQ(result_data, expected_result[i]);
}
}

View File

@ -1,145 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/roi_align.hpp"
#include <numeric>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/all_close_f.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, roi_align_avg_pool) {
const int N = 1;
const int C = 3;
const int H = 5;
const int W = 5;
const int num_rois = 5;
const int pooled_height = 3;
const int pooled_width = 4;
const auto data_shape = Shape{N, C, H, W};
const auto rois_shape = Shape{num_rois, 4};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto rois = make_shared<op::Parameter>(element::f32, rois_shape);
const auto batch_indices = make_shared<op::Parameter>(element::i32, Shape{num_rois});
auto roi_align =
make_shared<op::v3::ROIAlign>(data, rois, batch_indices, pooled_height, pooled_width, 2, 1.0f / 16.0f, "avg");
auto f = make_shared<Function>(roi_align, ParameterVector{data, rois, batch_indices});
std::vector<float> data_vec{0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.,
15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44.,
45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74.};
std::vector<float> rois_vec{7., 5., 7., 5., -15., -15., -15., -15., -10., 21.,
-10., 21., 13., 8., 13., 8., -14., 19., -14., 19.};
std::vector<int64_t> batch_indices_vec{0, 0, 0, 0, 0};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(f->evaluate({result},
{make_host_tensor<element::Type_t::f32>(data_shape, data_vec),
make_host_tensor<element::Type_t::f32>(rois_shape, rois_vec),
make_host_tensor<element::Type_t::i64>(Shape{num_rois})}));
std::vector<float> expected_vec{
2.95833f, 3.20833f, 3.45833f, 3.70833f, 4.625f, 4.875f, 5.125f, 5.375f, 6.29167f, 6.54167f, 6.79167f,
7.04167f, 27.9583f, 28.2083f, 28.4583f, 28.7083f, 29.625f, 29.875f, 30.125f, 30.375f, 31.2917f, 31.5417f,
31.7917f, 32.0417f, 52.9583f, 53.2083f, 53.4583f, 53.7083f, 54.625f, 54.875f, 55.125f, 55.375f, 56.2917f,
56.5417f, 56.7917f, 57.0417f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
0.f, 0.f, 0.f, 0.f, 25.f, 25.f, 25.f, 25.f, 25.f, 25.f, 25.f,
25.f, 25.f, 25.f, 25.f, 25.f, 50.f, 50.f, 50.f, 50.f, 50.f, 50.f,
50.f, 50.f, 50.f, 50.f, 50.f, 50.f, 7.39583f, 7.39583f, 7.42708f, 7.64583f, 9.0625f,
9.0625f, 9.09375f, 9.3125f, 10.7292f, 10.7292f, 10.7604f, 10.9792f, 32.3958f, 32.3958f, 32.4271f, 32.6458f,
34.0625f, 34.0625f, 34.0938f, 34.3125f, 35.7292f, 35.7292f, 35.7604f, 35.9792f, 57.3958f, 57.3958f, 57.4271f,
57.6458f, 59.0625f, 59.0625f, 59.0938f, 59.3125f, 60.7292f, 60.7292f, 60.7604f, 60.9792f, 4.27083f, 4.52083f,
4.77083f, 5.02083f, 5.9375f, 6.1875f, 6.4375f, 6.6875f, 7.60417f, 7.85417f, 8.10417f, 8.35417f, 29.2708f,
29.5208f, 29.7708f, 30.0208f, 30.9375f, 31.1875f, 31.4375f, 31.6875f, 32.6042f, 32.8542f, 33.1042f, 33.3542f,
54.2708f, 54.5208f, 54.7708f, 55.0208f, 55.9375f, 56.1875f, 56.4375f, 56.6875f, 57.6042f, 57.8542f, 58.1042f,
58.3542f, 6.77083f, 6.77083f, 6.77083f, 6.80208f, 8.4375f, 8.4375f, 8.4375f, 8.46875f, 10.1042f, 10.1042f,
10.1042f, 10.1354f, 31.7708f, 31.7708f, 31.7708f, 31.8021f, 33.4375f, 33.4375f, 33.4375f, 33.4688f, 35.1042f,
35.1042f, 35.1042f, 35.1354f, 56.7708f, 56.7708f, 56.7708f, 56.8021f, 58.4375f, 58.4375f, 58.4375f, 58.4688f,
60.1042f, 60.1042f, 60.1042f, 60.1354f};
const auto expected_shape = Shape{num_rois, C, pooled_height, pooled_width};
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), expected_shape);
ASSERT_TRUE(test::all_close_f(read_vector<float>(result), expected_vec, 6, 0.001));
}
TEST(op_eval, roi_align_max_pool) {
const int N = 1;
const int C = 3;
const int H = 5;
const int W = 5;
const int num_rois = 5;
const int pooled_height = 3;
const int pooled_width = 4;
const auto data_shape = Shape{N, C, H, W};
const auto rois_shape = Shape{num_rois, 4};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto rois = make_shared<op::Parameter>(element::f32, rois_shape);
const auto batch_indices = make_shared<op::Parameter>(element::i32, Shape{num_rois});
auto roi_align =
make_shared<op::v3::ROIAlign>(data, rois, batch_indices, pooled_height, pooled_width, 2, 1.0f / 16.0f, "max");
auto f = make_shared<Function>(roi_align, ParameterVector{data, rois, batch_indices});
std::vector<float> data_vec{0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.,
15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44.,
45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74.};
std::vector<float> rois_vec{7., 5., 7., 5., -15., -15., -15., -15., -10., 21.,
-10., 21., 13., 8., 13., 8., -14., 19., -14., 19.};
std::vector<int64_t> batch_indices_vec{0, 0, 0, 0, 0};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(f->evaluate({result},
{make_host_tensor<element::Type_t::f32>(data_shape, data_vec),
make_host_tensor<element::Type_t::f32>(rois_shape, rois_vec),
make_host_tensor<element::Type_t::i64>(Shape{num_rois})}));
std::vector<float> expected_vec{
3.4375, 3.6875, 3.9375, 4.1875, 5.10417, 5.35417, 5.60417, 5.85417, 6.77083, 7.02083, 7.27083, 7.52083,
28.4375, 28.6875, 28.9375, 29.1875, 30.1042, 30.3542, 30.6042, 30.8542, 31.7708, 32.0208, 32.2708, 32.5208,
53.4375, 53.6875, 53.9375, 54.1875, 55.1042, 55.3542, 55.6042, 55.8542, 56.7708, 57.0208, 57.2708, 57.5208,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
7.8125, 7.8125, 7.875, 8.125, 9.47917, 9.47917, 9.54167, 9.79167, 11.1458, 11.1458, 11.2083, 11.4583,
32.8125, 32.8125, 32.875, 33.125, 34.4792, 34.4792, 34.5417, 34.7917, 36.1458, 36.1458, 36.2083, 36.4583,
57.8125, 57.8125, 57.875, 58.125, 59.4792, 59.4792, 59.5417, 59.7917, 61.1458, 61.1458, 61.2083, 61.4583,
4.75, 5, 5.25, 5.5, 6.41667, 6.66667, 6.91667, 7.16667, 8.08333, 8.33333, 8.58333, 8.83333,
29.75, 30, 30.25, 30.5, 31.4167, 31.6667, 31.9167, 32.1667, 33.0833, 33.3333, 33.5833, 33.8333,
54.75, 55, 55.25, 55.5, 56.4167, 56.6667, 56.9167, 57.1667, 58.0833, 58.3333, 58.5833, 58.8333,
7.1875, 7.1875, 7.1875, 7.25, 8.85417, 8.85417, 8.85417, 8.91667, 10.5208, 10.5208, 10.5208, 10.5833,
32.1875, 32.1875, 32.1875, 32.25, 33.8542, 33.8542, 33.8542, 33.9167, 35.5208, 35.5208, 35.5208, 35.5833,
57.1875, 57.1875, 57.1875, 57.25, 58.8542, 58.8542, 58.8542, 58.9167, 60.5208, 60.5208, 60.5208, 60.5833};
const auto expected_shape = Shape{num_rois, C, pooled_height, pooled_width};
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), expected_shape);
ASSERT_TRUE(test::all_close_f(read_vector<float>(result), expected_vec, 6, 0.001));
}

View File

@ -1,44 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "engines_util/test_case.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
using namespace std;
using namespace ngraph;
TEST(op_eval, roi_pooling_invalid_roi_batch_id) {
const int H = 6;
const int W = 6;
const int image_size = H * W;
const int channels = 1;
const int num_rois = 1;
const int pooled_h = 1;
const int pooled_w = 1;
const float spatial_scale = 1.f;
Shape feat_maps_shape{1, channels, H, W};
Shape rois_shape{num_rois, 5};
Shape pooled_shape{pooled_h, pooled_w};
Shape output_shape{num_rois, channels, pooled_h, pooled_w};
const auto feat_maps = make_shared<op::Parameter>(element::f32, feat_maps_shape);
const auto rois = make_shared<op::Parameter>(element::f32, rois_shape);
const auto roi_pooling = make_shared<op::v0::ROIPooling>(feat_maps, rois, pooled_shape, spatial_scale, "max");
const auto f = make_shared<Function>(roi_pooling, ParameterVector{feat_maps, rois});
vector<float> feat_maps_vect;
for (unsigned int i = 0; i < channels * image_size; i++) {
feat_maps_vect.push_back(1.f * i / 10);
}
auto test_case = test::TestCase(f, "TEMPLATE");
test_case.add_input<float>(feat_maps_shape, feat_maps_vect);
// ROI with invalid batch id, should throw exception
test_case.add_input<float>(rois_shape, {-1, 1, 1, 2, 3});
test_case.add_expected_output<float>(output_shape, {2.0f});
ASSERT_ANY_THROW(test_case.run());
}

View File

@ -1,54 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/round.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, rounding_to_even) {
auto p = make_shared<op::Parameter>(element::f32, Shape{9});
auto round = make_shared<op::v5::Round>(p, op::v5::Round::RoundMode::HALF_TO_EVEN);
auto fun = make_shared<Function>(OutputVector{round}, ParameterVector{p});
std::vector<float> inputs{-2.5f, -1.5f, -0.5f, 0.5f, 0.9f, 1.5f, 2.3f, 2.5f, 3.5f};
std::vector<float> expected_result{-2.f, -2.f, -0.f, 0.f, 1.f, 2.f, 2.f, 2.f, 4.f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{9}, inputs)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{9});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}
TEST(op_eval, rounding_away) {
auto p = make_shared<op::Parameter>(element::f32, Shape{9});
auto round = make_shared<op::v5::Round>(p, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO);
auto fun = make_shared<Function>(OutputVector{round}, ParameterVector{p});
std::vector<float> inputs{-2.5f, -1.5f, -0.5f, 0.5f, 0.9f, 1.5f, 2.3f, 2.5f, 3.5f};
std::vector<float> expected_result{-3.f, -2.f, -1.f, 1.f, 1.f, 2.f, 2.f, 3.f, 4.f};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{9}, inputs)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{9});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/softplus.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, softplus_4D) {
auto p = make_shared<op::Parameter>(element::f32, Shape{4});
auto softplus = make_shared<op::v4::SoftPlus>(p);
auto fun = make_shared<Function>(OutputVector{softplus}, ParameterVector{p});
std::vector<float> inputs{-1.0, 0.0, 1.0, 20.0};
std::vector<float> expected_result{0.31326166, 0.69314718, 1.3132616, 20.0};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{4}, inputs)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{4});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}

View File

@ -1,144 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/split.hpp"
#include <numeric>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, split) {
const auto data_shape = Shape{3, 8, 3};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const size_t num_splits = 4;
auto split = make_shared<op::v1::Split>(data, axis, num_splits);
auto f = make_shared<Function>(split, ParameterVector{data, axis});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{
{0, 1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 48, 49, 50, 51, 52, 53},
{6, 7, 8, 9, 10, 11, 30, 31, 32, 33, 34, 35, 54, 55, 56, 57, 58, 59},
{12, 13, 14, 15, 16, 17, 36, 37, 38, 39, 40, 41, 60, 61, 62, 63, 64, 65},
{18, 19, 20, 21, 22, 23, 42, 43, 44, 45, 46, 47, 66, 67, 68, 69, 70, 71}};
HostTensorVector results(num_splits);
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>(Shape{}, std::vector<int64_t>{1})}));
for (size_t i = 0; i < num_splits; ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{3, 2, 3}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, split_neg_axis) {
const auto data_shape = Shape{2, 1, 4, 1};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const size_t num_splits = 4;
auto split = make_shared<op::v1::Split>(data, axis, num_splits);
auto f = make_shared<Function>(split, ParameterVector{data, axis});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{{0, 4}, {1, 5}, {2, 6}, {3, 7}};
HostTensorVector results(num_splits);
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>(Shape{}, std::vector<int64_t>{-2})}));
for (size_t i = 0; i < num_splits; ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, 1, 1}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, split_boolean_type) {
const auto data_shape = Shape{2, 1, 2, 1, 2};
const auto data = make_shared<op::Parameter>(element::boolean, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const size_t num_splits = 2;
auto split = make_shared<op::v1::Split>(data, axis, num_splits);
auto f = make_shared<Function>(split, ParameterVector{data, axis});
std::vector<char> data_vec{true, false, true, false, true, false, true, false};
std::vector<std::vector<char>> expected_results{{true, false, true, false}, {true, false, true, false}};
HostTensorVector results(num_splits);
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::boolean>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>(Shape{}, std::vector<int64_t>{2})}));
for (size_t i = 0; i < num_splits; ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::boolean);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, 1, 1, 2}));
EXPECT_EQ(read_vector<char>(results[i]), expected_results[i]);
}
}
TEST(op_eval, split_1d) {
const auto data_shape = Shape{8};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const size_t num_splits = 4;
auto split = make_shared<op::v1::Split>(data, axis, num_splits);
auto f = make_shared<Function>(split, ParameterVector{data, axis});
std::vector<float> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0.0f);
std::vector<std::vector<float>> expected_results{{0.0f, 1.0f}, {2.0f, 3.0f}, {4.0f, 5.0f}, {6.0f, 7.0f}};
HostTensorVector results(num_splits);
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::f32>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>(Shape{}, std::vector<int64_t>{0})}));
for (size_t i = 0; i < num_splits; ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::f32);
EXPECT_EQ(results[i]->get_shape(), (Shape{2}));
EXPECT_EQ(read_vector<float>(results[i]), expected_results[i]);
}
}

View File

@ -1,207 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/strided_slice.hpp"
#include <numeric>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, strided_slice1) {
auto A_shape = Shape{3, 2, 3};
auto A = make_shared<op::Parameter>(element::i64, A_shape);
auto begin = make_shared<op::Parameter>(element::i64, Shape{3});
auto end = make_shared<op::Parameter>(element::i64, Shape{3});
auto strides = make_shared<op::Parameter>(element::i64, Shape{3});
auto r = make_shared<op::v1::StridedSlice>(A,
begin,
end,
strides,
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0));
auto f = make_shared<Function>(r, ParameterVector{A, begin, end, strides});
std::vector<int64_t> A_vec(3 * 2 * 3);
std::iota(A_vec.begin(), A_vec.end(), 0);
std::vector<std::vector<int64_t>> begin_vecs{{1, 0, 0}, {1, 0, 0}, {2, 0, 0}};
std::vector<std::vector<int64_t>> end_vecs{{2, 1, 3}, {2, 2, 3}, {3, 2, 3}};
std::vector<std::vector<int64_t>> strides_vecs{{1, 1, 1}, {1, 1, 1}, {1, 1, 2}};
std::vector<std::vector<int64_t>> expected_results{{6, 7, 8}, {6, 7, 8, 9, 10, 11}, {12, 14, 15, 17}};
std::vector<Shape> expected_shape{Shape{1, 1, 3}, Shape{1, 2, 3}, Shape{1, 2, 2}};
for (size_t i = 0; i < begin_vecs.size(); ++i) {
auto result = make_shared<HostTensor>();
ASSERT_TRUE(f->evaluate({result},
{make_host_tensor<element::Type_t::i64>(A_shape, A_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, begin_vecs[i]),
make_host_tensor<element::Type_t::i64>(Shape{3}, end_vecs[i]),
make_host_tensor<element::Type_t::i64>(Shape{3}, strides_vecs[i])}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), expected_shape[i]);
EXPECT_EQ(read_vector<int64_t>(result), expected_results[i]);
}
}
// A Shape{3, 2, 3}
// [[[ 0 1 2]
// [ 3 4 5]]
// [[ 6 7 8]
// [ 9 10 11]]
// [[12 13 14]
// [15 16 17]]]
// A[1:, :, :]
// result Shape{2, 2, 3}
// [[[ 6 7 8]
// [ 9 10 11]]
// [[12 13 14]
// [15 16 17]]]
TEST(op_eval, strided_slice2) {
auto A_shape = Shape{3, 2, 3};
auto A = make_shared<op::Parameter>(element::i64, A_shape);
auto begin = make_shared<op::Parameter>(element::i64, Shape{3});
auto end = make_shared<op::Parameter>(element::i64, Shape{3});
auto strides = make_shared<op::Parameter>(element::i64, Shape{3});
std::vector<int64_t> begin_vec{1, 0, 0};
std::vector<int64_t> end_vec{0, 0, 0};
std::vector<int64_t> strides_vec{1, 1, 1};
std::vector<int64_t> begin_mask{0, 1, 1};
std::vector<int64_t> end_mask{1, 1, 1};
auto r = make_shared<op::v1::StridedSlice>(A,
begin,
end,
strides,
begin_mask,
end_mask,
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0));
auto f = make_shared<Function>(r, ParameterVector{A, begin, end, strides});
std::vector<int64_t> A_vec(3 * 2 * 3);
std::iota(A_vec.begin(), A_vec.end(), 0);
std::vector<int64_t> expected{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17};
Shape expected_shape{2, 2, 3};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(f->evaluate({result},
{make_host_tensor<element::Type_t::i64>(A_shape, A_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, begin_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, end_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, strides_vec)}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), expected_shape);
EXPECT_EQ(read_vector<int64_t>(result), expected);
}
// A Shape{3, 2, 3}
// A[:2, 1:, ::2]
// result Shape{2, 1, 2}
// [[[3 5]]
// [[9 11]]]
TEST(op_eval, strided_slice3) {
auto A_shape = Shape{3, 2, 3};
auto A = make_shared<op::Parameter>(element::i64, A_shape);
auto begin = make_shared<op::Parameter>(element::i64, Shape{3});
auto end = make_shared<op::Parameter>(element::i64, Shape{3});
auto strides = make_shared<op::Parameter>(element::i64, Shape{3});
std::vector<int64_t> begin_vec{0, 1, 0};
std::vector<int64_t> end_vec{2, 0, 0};
std::vector<int64_t> strides_vec{1, 1, 2};
std::vector<int64_t> begin_mask{1, 0, 1};
std::vector<int64_t> end_mask{0, 1, 1};
auto r = make_shared<op::v1::StridedSlice>(A,
begin,
end,
strides,
begin_mask,
end_mask,
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0));
auto f = make_shared<Function>(r, ParameterVector{A, begin, end, strides});
std::vector<int64_t> A_vec(3 * 2 * 3);
std::iota(A_vec.begin(), A_vec.end(), 0);
std::vector<int64_t> expected{3, 5, 9, 11};
Shape expected_shape{2, 1, 2};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(f->evaluate({result},
{make_host_tensor<element::Type_t::i64>(A_shape, A_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, begin_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, end_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, strides_vec)}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), expected_shape);
EXPECT_EQ(read_vector<int64_t>(result), expected);
}
// A Shape{3, 2, 3}
// A[0:1, :, ::-1]
// result Shape{1, 2, 3}
// [[[2 1 0]
// [5 4 3]]]
TEST(op_eval, strided_slice_reverse) {
auto A_shape = Shape{3, 2, 3};
auto A = make_shared<op::Parameter>(element::i64, A_shape);
auto begin = make_shared<op::Parameter>(element::i64, Shape{3});
auto end = make_shared<op::Parameter>(element::i64, Shape{3});
auto strides = make_shared<op::Parameter>(element::i64, Shape{3});
std::vector<int64_t> begin_vec{0, 0, 0};
std::vector<int64_t> end_vec{1, 0, 0};
std::vector<int64_t> strides_vec{1, 1, -1};
std::vector<int64_t> begin_mask{0, 1, 1};
std::vector<int64_t> end_mask{0, 1, 1};
auto r = make_shared<op::v1::StridedSlice>(A,
begin,
end,
strides,
begin_mask,
end_mask,
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0),
std::vector<int64_t>(3, 0));
auto f = make_shared<Function>(r, ParameterVector{A, begin, end, strides});
std::vector<int64_t> A_vec(3 * 2 * 3);
std::iota(A_vec.begin(), A_vec.end(), 0);
std::vector<int64_t> expected{2, 1, 0, 5, 4, 3};
Shape expected_shape{1, 2, 3};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(f->evaluate({result},
{make_host_tensor<element::Type_t::i64>(A_shape, A_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, begin_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, end_vec),
make_host_tensor<element::Type_t::i64>(Shape{3}, strides_vec)}));
EXPECT_EQ(result->get_element_type(), element::i64);
EXPECT_EQ(result->get_shape(), expected_shape);
EXPECT_EQ(read_vector<int64_t>(result), expected);
}

View File

@ -1,89 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/swish.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/opsets/opset9.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, swish_with_beta1) {
auto p = make_shared<op::Parameter>(element::f32, Shape{3});
auto beta = make_shared<op::Parameter>(element::f32, Shape{});
auto swish = make_shared<op::v4::Swish>(p, beta);
auto fun = make_shared<Function>(OutputVector{swish}, ParameterVector{p, beta});
std::vector<float> inputs{-0.5, 0.0, 0.5};
std::vector<float> expected_result{-0.18877034, 0.0, 0.31122968};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(Shape{3}, inputs),
make_host_tensor<element::Type_t::f32>(Shape{}, {1.0})}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{3});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}
TEST(op_eval, swish_with_beta0_75) {
auto p = make_shared<op::Parameter>(element::f32, Shape{3});
auto beta = make_shared<op::Parameter>(element::f32, Shape{});
auto swish = make_shared<op::v4::Swish>(p, beta);
auto fun = make_shared<Function>(OutputVector{swish}, ParameterVector{p, beta});
std::vector<float> inputs{-0.5, 0.0, 0.5};
std::vector<float> expected_result{-0.2036667, 0.0, 0.2963333};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result},
{make_host_tensor<element::Type_t::f32>(Shape{3}, inputs),
make_host_tensor<element::Type_t::f32>(Shape{}, {0.75})}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{3});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}
TEST(op_eval, swish_without_beta) {
auto p = make_shared<op::Parameter>(element::f32, Shape{3});
auto swish = make_shared<op::v4::Swish>(p);
auto fun = make_shared<Function>(OutputVector{swish}, ParameterVector{p});
std::vector<float> inputs{-0.5, 0.0, 0.5};
std::vector<float> expected_result{-0.18877034, 0.0, 0.31122968};
auto result = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{3}, inputs)}));
EXPECT_EQ(result->get_element_type(), element::f32);
EXPECT_EQ(result->get_shape(), Shape{3});
auto result_data = read_vector<float>(result);
for (size_t i = 0; i < inputs.size(); i++)
EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
}
TEST(op_eval, swish_new_evaluate) {
Shape shape{3};
auto p = make_shared<op::Parameter>(element::f32, shape);
auto beta = make_shared<op::Parameter>(element::f32, Shape{});
auto swish = make_shared<ov::opset9::Swish>(p, beta);
ov::TensorVector inputs = {ov::Tensor(element::f32, shape)};
ov::TensorVector outputs = {ov::Tensor(element::f32, shape)};
ASSERT_TRUE(swish->evaluate(outputs, inputs));
}

View File

@ -1,216 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/transpose.hpp"
#include <string>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/transpose.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/all_close_f.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
template <element::Type_t IN_ET, element::Type_t AXIS_ET>
void test_tranpose_eval(shared_ptr<Function> fun) {
using T = typename element_type_traits<IN_ET>::value_type;
using T_AXIS = typename element_type_traits<AXIS_ET>::value_type;
const std::vector<std::vector<T>> input_data{{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
std::vector<Shape> data_shapes{{2, 3}, {2, 3}, {2, 3, 1}, {2, 2, 3}};
const std::vector<std::vector<T_AXIS>> axes_order{{0, 1}, {1, 0}, {1, 2, 0}, {2, 1, 0}};
std::vector<std::vector<T>> expected_results{{1, 2, 3, 4, 5, 6},
{1, 4, 2, 5, 3, 6},
{1, 4, 2, 5, 3, 6},
{1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12}};
std::vector<Shape> expected_result_shapes{{2, 3}, {3, 2}, {3, 1, 2}, {3, 2, 2}};
for (size_t i = 0; i < data_shapes.size(); i++) {
auto result_tensor = make_shared<HostTensor>(element::dynamic, PartialShape::dynamic());
auto axes_shape = axes_order[i].size() ? Shape{axes_order[i].size()} : Shape{};
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<IN_ET>(data_shapes[i], input_data[i]),
make_host_tensor<AXIS_ET>(axes_shape, axes_order[i])}));
auto actual_results = read_vector<T>(result_tensor);
ASSERT_EQ(actual_results, expected_results[i]);
}
}
TEST(op_eval, eval_transpose) {
vector<shared_ptr<op::Parameter>> axes;
axes.push_back(make_shared<op::Parameter>(element::i8, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::i16, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::u8, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::u16, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::u32, PartialShape{Dimension::dynamic()}));
axes.push_back(make_shared<op::Parameter>(element::u64, PartialShape{Dimension::dynamic()}));
for (auto& axis : axes) {
const auto input_integral = make_shared<op::Parameter>(element::i16, PartialShape::dynamic());
const auto transpose_integral = make_shared<op::v1::Transpose>(input_integral, axis);
const auto function_integral =
make_shared<Function>(OutputVector{transpose_integral}, ParameterVector{input_integral, axis});
const auto input_floating = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto transpose_floating = make_shared<op::v1::Transpose>(input_floating, axis);
const auto function_floating =
make_shared<Function>(OutputVector{transpose_floating}, ParameterVector{input_floating, axis});
switch (axis->get_element_type()) {
case element::Type_t::i8:
test_tranpose_eval<element::Type_t::i16, element::Type_t::i8>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::i8>(function_floating);
break;
case element::Type_t::i16:
test_tranpose_eval<element::Type_t::i16, element::Type_t::i16>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::i16>(function_floating);
break;
case element::Type_t::i32:
test_tranpose_eval<element::Type_t::i16, element::Type_t::i32>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::i32>(function_floating);
break;
case element::Type_t::i64:
test_tranpose_eval<element::Type_t::i16, element::Type_t::i64>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::i64>(function_floating);
break;
case element::Type_t::u8:
test_tranpose_eval<element::Type_t::i16, element::Type_t::u8>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::u8>(function_floating);
break;
case element::Type_t::u16:
test_tranpose_eval<element::Type_t::i16, element::Type_t::u16>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::u16>(function_floating);
break;
case element::Type_t::u32:
test_tranpose_eval<element::Type_t::i16, element::Type_t::u32>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::u32>(function_floating);
break;
case element::Type_t::u64:
test_tranpose_eval<element::Type_t::i16, element::Type_t::u64>(function_integral);
test_tranpose_eval<element::Type_t::f32, element::Type_t::u64>(function_floating);
break;
default:
NGRAPH_CHECK(false, "Invalid type");
break;
}
}
}
TEST(op_eval, eval_axes_transpose) {
auto data_param = make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
auto axes_order = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto x_transpose = make_shared<op::v1::Transpose>(data_param, axes_order);
auto function = make_shared<Function>(NodeVector{x_transpose}, ParameterVector{data_param, axes_order});
const std::vector<int32_t> data{1, 2, 3, 4, 5, 6};
std::vector<size_t> data_shape{2, 3, 1};
const std::vector<int32_t> perm{1, 2, 0};
std::vector<int32_t> expected_result{1, 4, 2, 5, 3, 6};
auto result_tensor = make_shared<HostTensor>();
function->evaluate({result_tensor},
{make_host_tensor<element::Type_t::i32>(data_shape, data),
make_host_tensor<element::Type_t::i32>(Shape{perm.size()}, perm)});
auto actual_results = read_vector<int32_t>(result_tensor);
ASSERT_EQ(actual_results, expected_result);
}
TEST(op_eval, eval_duplicated_axes_transpose) {
auto data_param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes_order = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto x_transpose = make_shared<op::v1::Transpose>(data_param, axes_order);
auto function = make_shared<Function>(NodeVector{x_transpose}, ParameterVector{data_param, axes_order});
const std::vector<float> data{1, 2, 3, 4, 5, 6};
std::vector<size_t> data_shape{2, 3, 1};
const std::vector<int32_t> perm{2, 1, 2};
try {
auto result_tensor = make_shared<HostTensor>();
function->evaluate({result_tensor},
{make_host_tensor<element::Type_t::f32>(data_shape, data),
make_host_tensor<element::Type_t::i32>(Shape{perm.size()}, perm)});
FAIL() << "Duplicated axes values not detected";
} catch (const ngraph_error& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Permutation AxisVector{2, 1, 2} is not valid for input shape"));
} catch (...) {
FAIL() << "Failed for unexpected reason";
}
}
TEST(op_eval, eval_out_of_shape_axes_transpose) {
auto data_param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes_order = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto x_transpose = make_shared<op::v1::Transpose>(data_param, axes_order);
auto function = make_shared<Function>(NodeVector{x_transpose}, ParameterVector{data_param, axes_order});
const std::vector<float> data{1, 2, 3, 4, 5, 6};
std::vector<size_t> data_shape{2, 3, 1};
const std::vector<int32_t> perm{0, 1, 3};
try {
auto result_tensor = make_shared<HostTensor>();
function->evaluate({result_tensor},
{make_host_tensor<element::Type_t::f32>(data_shape, data),
make_host_tensor<element::Type_t::i32>(Shape{perm.size()}, perm)});
FAIL() << "Out of shape axes not detected";
} catch (const ngraph_error& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Permutation AxisVector{0, 1, 3} is not valid for input shape"));
} catch (...) {
FAIL() << "Failed for unexpected reason";
}
}
TEST(op_eval, eval_negative_axes_transpose) {
auto data_param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes_order = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
auto x_transpose = make_shared<op::v1::Transpose>(data_param, axes_order);
auto function = make_shared<Function>(NodeVector{x_transpose}, ParameterVector{data_param, axes_order});
const std::vector<float> data{1, 2, 3, 4, 5, 6};
std::vector<size_t> data_shape{2, 3, 1};
const std::vector<int32_t> perm{-1, -2, -3};
std::vector<float> expected_result{1, 4, 2, 5, 3, 6};
try {
auto result_tensor = make_shared<HostTensor>();
function->evaluate({result_tensor},
{make_host_tensor<element::Type_t::f32>(data_shape, data),
make_host_tensor<element::Type_t::i32>(Shape{perm.size()}, perm)});
auto actual_results = read_vector<float>(result_tensor);
ASSERT_EQ(actual_results, expected_result);
FAIL() << "Negative axes for Transpose were not supported before.";
} catch (const ngraph_error& error) {
std::stringstream exp_msg;
exp_msg << "Permutation " << AxisVector(perm.begin(), perm.end()) << " is not valid for input shape";
EXPECT_HAS_SUBSTRING(error.what(), exp_msg.str());
} catch (...) {
FAIL() << "Failed for unexpected reason";
}
}

View File

@ -1,260 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/variadic_split.hpp"
#include <numeric>
#include <vector>
#include "engines_util/execute_tools.hpp"
#include "gtest/gtest.h"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
OPENVINO_SUPPRESS_DEPRECATED_START
TEST(op_eval, variadic_split_same_lengths) {
const auto data_shape = Shape{3, 8, 3};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i64, Shape{4});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{
{0, 1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 48, 49, 50, 51, 52, 53},
{6, 7, 8, 9, 10, 11, 30, 31, 32, 33, 34, 35, 54, 55, 56, 57, 58, 59},
{12, 13, 14, 15, 16, 17, 36, 37, 38, 39, 40, 41, 60, 61, 62, 63, 64, 65},
{18, 19, 20, 21, 22, 23, 42, 43, 44, 45, 46, 47, 66, 67, 68, 69, 70, 71}};
const vector<int64_t> split_lengths_vec{2, 2, 2, 2};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>({}, std::vector<int64_t>{1}),
make_host_tensor<element::Type_t::i64>({4}, split_lengths_vec)}));
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{3, static_cast<size_t>(split_lengths_vec[i]), 3}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, variadic_split_different_lengths) {
const auto data_shape = Shape{6, 2, 3};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i64, Shape{3});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{
{0, 1, 2, 3, 4, 5},
{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17},
{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35}};
const vector<int64_t> split_lengths_vec{1, 2, 3};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>({}, std::vector<int64_t>{0}),
make_host_tensor<element::Type_t::i64>({3}, split_lengths_vec)}));
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{static_cast<size_t>(split_lengths_vec[i]), 2, 3}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, variadic_split_neg_length) {
const auto data_shape = Shape{2, 7, 1};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i64, Shape{3});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{{0, 1, 2, 7, 8, 9}, {3, 10}, {4, 5, 6, 11, 12, 13}};
const vector<int64_t> split_lengths_vec{-1, 1, 3};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>({}, std::vector<int64_t>{1}),
make_host_tensor<element::Type_t::i64>({3}, split_lengths_vec)}));
const vector<size_t> expected_lengths{3, 1, 3};
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, expected_lengths[i], 1}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, variadic_split_neg_length_neg_axis) {
const auto data_shape = Shape{2, 1, 5, 2};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i64, Shape{3});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{{0, 1, 10, 11},
{2, 3, 4, 5, 12, 13, 14, 15},
{6, 7, 8, 9, 16, 17, 18, 19}};
const vector<int64_t> split_lengths_vec{1, 2, -1};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>({}, std::vector<int64_t>{-2}),
make_host_tensor<element::Type_t::i64>(Shape{3}, split_lengths_vec)}));
const vector<size_t> expected_lengths{1, 2, 2};
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i], 2}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, variadic_split_neg_length_bool_data_type) {
const auto data_shape = Shape{2, 1, 5};
const auto data = make_shared<op::Parameter>(element::boolean, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i64, Shape{3});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<char> data_vec{true, true, false, false, true, false, true, false, true, false};
std::vector<std::vector<char>> expected_results{{true, false},
{true, false, true, false},
{false, true, true, false}};
const vector<int64_t> split_lengths_vec{1, -1, 2};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::boolean>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>({}, std::vector<int64_t>{2}),
make_host_tensor<element::Type_t::i64>({3}, split_lengths_vec)}));
const vector<size_t> expected_lengths{1, 2, 2};
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::boolean);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i]}));
EXPECT_EQ(read_vector<char>(results[i]), expected_results[i]);
}
}
TEST(op_eval, variadic_split_neg_length_axis_ui64) {
const auto data_shape = Shape{2, 1, 4, 2};
const auto data = make_shared<op::Parameter>(element::i64, data_shape);
const auto axis = make_shared<op::Parameter>(element::u64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i64, Shape{2});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<int64_t> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<std::vector<int64_t>> expected_results{{0, 1, 2, 3, 8, 9, 10, 11}, {4, 5, 6, 7, 12, 13, 14, 15}};
const vector<int64_t> split_lengths_vec{2, -1};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::i64>(data_shape, data_vec),
make_host_tensor<element::Type_t::u64>({}, std::vector<uint64_t>{2}),
make_host_tensor<element::Type_t::i64>({split_lengths_vec.size()}, split_lengths_vec)}));
const vector<size_t> expected_lengths{2, 2};
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::i64);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i], 2}));
EXPECT_EQ(read_vector<int64_t>(results[i]), expected_results[i]);
}
}
TEST(op_eval, variadic_split_data_float_length_i32) {
const auto data_shape = Shape{2, 3, 3};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axis = make_shared<op::Parameter>(element::i64, Shape{});
const auto split_lengths = make_shared<op::Parameter>(element::i32, Shape{3});
auto var_split = make_shared<op::v1::VariadicSplit>(data, axis, split_lengths);
auto f = make_shared<Function>(var_split, ParameterVector{data, axis, split_lengths});
std::vector<float> data_vec(shape_size(data_shape));
std::iota(data_vec.begin(), data_vec.end(), 0.0f);
std::vector<std::vector<float>> expected_results{{0.0f, 3.0f, 6.0f, 9.0f, 12.0f, 15.0f},
{1.0f, 4.0f, 7.0f, 10.0f, 13.0f, 16.0f},
{2.0f, 5.0f, 8.0f, 11.0f, 14.0f, 17.0f}};
const vector<int32_t> split_lengths_vec{1, 1, -1};
HostTensorVector results(split_lengths_vec.size());
for (auto& result : results) {
result = make_shared<HostTensor>();
}
ASSERT_TRUE(f->evaluate(results,
{make_host_tensor<element::Type_t::f32>(data_shape, data_vec),
make_host_tensor<element::Type_t::i64>({}, std::vector<int64_t>{-1}),
make_host_tensor<element::Type_t::i32>({3}, split_lengths_vec)}));
const vector<size_t> expected_lengths{1, 1, 1};
for (size_t i = 0; i < split_lengths_vec.size(); ++i) {
EXPECT_EQ(results[i]->get_element_type(), element::f32);
EXPECT_EQ(results[i]->get_shape(), (Shape{2, 3, expected_lengths[i]}));
EXPECT_EQ(read_vector<float>(results[i]), expected_results[i]);
}
}

View File

@ -97,7 +97,7 @@ std::vector<FloorModParams> generateParamsForFloorModBroadcast() {
IN_ET,
std::vector<T>{1, 2, 3, 4},
std::vector<T>{2, 3},
std::vector<T>{1.0f, 0.0f, 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 1.0f}),
std::vector<T>{1, 0, 1, 2, 1, 0, 0, 1}),
};
return params;
}
@ -117,6 +117,17 @@ std::vector<FloorModParams> generateParamsForFloorModScalar() {
return params;
}
template <element::Type_t IN_ET>
std::vector<FloorModParams> generateParamsForFloorModNonIntegerDivisor() {
using T = typename element_type_traits<IN_ET>::value_type;
// clang-format off
return {FloorModParams(ov::PartialShape{8}, ov::PartialShape{8}, IN_ET,
std::vector<T>{-3.2, -3.1, -3.0, 5.0, 5.1, 5.2, -1.6, 1.6},
std::vector<T>{-3.1, -3.1, -3.1, -5.1, -5.1, -5.1, 1.7, 1.7},
std::vector<T>{-0.1, -0.0, -3.0, -0.1, -0.0, -5.0, 0.1, 1.6})};
// clang-format on
}
std::vector<FloorModParams> generateCombinedParamsForFloorMod() {
const std::vector<std::vector<FloorModParams>> allTypeParams{
generateParamsForFloorMod<element::Type_t::f32>(),
@ -173,6 +184,10 @@ std::vector<FloorModParams> generateCombinedParamsForFloorModScalar() {
return combinedParams;
}
std::vector<FloorModParams> generateCombinedParamsForFloorModNonIntegerDivisor() {
return generateParamsForFloorModNonIntegerDivisor<element::Type_t::f32>();
}
INSTANTIATE_TEST_SUITE_P(
smoke_FloorMod_With_Hardcoded_Refs,
ReferenceFloorModLayerTest,
@ -186,9 +201,15 @@ INSTANTIATE_TEST_SUITE_P(
ReferenceFloorModLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_FloorMode_Scalar_With_Hardcoded_Refs,
smoke_FloorMod_Scalar_With_Hardcoded_Refs,
ReferenceFloorModLayerTest,
::testing::ValuesIn(generateCombinedParamsForFloorModScalar()),
ReferenceFloorModLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_FloorMod_NonInteger_Divisor,
ReferenceFloorModLayerTest,
::testing::ValuesIn(generateCombinedParamsForFloorModNonIntegerDivisor()),
ReferenceFloorModLayerTest::getTestCaseName);
} // namespace

View File

@ -8,11 +8,13 @@
#include "base_reference_test.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
using namespace ov;
using namespace reference_tests;
namespace {
namespace type_tests {
struct InterpolateV1Params {
template <class IT>
@ -91,19 +93,14 @@ struct InterpolateV4Params {
class ReferenceInterpolateV1LayerTest : public testing::TestWithParam<InterpolateV1Params>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.inShape,
params.outShape,
params.inType,
params.outType,
params.outShapeInput,
params.attrs);
const auto& params = GetParam();
function = CreateFunction(params);
inputData = {params.inData};
refOutData = {params.outData};
}
static std::string getTestCaseName(const testing::TestParamInfo<InterpolateV1Params>& obj) {
auto param = obj.param;
const auto& param = obj.param;
std::ostringstream result;
result << "iShape=" << param.inShape << "_";
result << "oShape=" << param.outShape << "_";
@ -113,14 +110,9 @@ public:
}
private:
static std::shared_ptr<Model> CreateFunction(const Shape& input_shape,
const Shape& output_shape,
const element::Type& input_type,
const element::Type& output_type,
const std::shared_ptr<op::v0::Constant>& output_shape_input,
op::v0::Interpolate::Attributes& attrs) {
const auto input = std::make_shared<op::v0::Parameter>(input_type, input_shape);
const auto interpolate = std::make_shared<op::v0::Interpolate>(input, output_shape_input, attrs);
static std::shared_ptr<Model> CreateFunction(const InterpolateV1Params& params) {
const auto input = std::make_shared<op::v0::Parameter>(params.inType, params.inShape);
const auto interpolate = std::make_shared<op::v0::Interpolate>(input, params.outShapeInput, params.attrs);
return std::make_shared<Model>(NodeVector{interpolate}, ParameterVector{input});
}
};
@ -128,21 +120,14 @@ private:
class ReferenceInterpolateV4LayerTest : public testing::TestWithParam<InterpolateV4Params>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.inShape,
params.outShape,
params.inType,
params.outType,
params.outShapeInput,
params.outShapeInputType,
params.scales,
params.attrs);
const auto& params = GetParam();
function = CreateFunction(params);
inputData = {params.inData};
refOutData = {params.outData};
}
static std::string getTestCaseName(const testing::TestParamInfo<InterpolateV4Params>& obj) {
auto param = obj.param;
const auto& param = obj.param;
std::ostringstream result;
result << "iShape=" << param.inShape << "_";
result << "oShape=" << param.outShape << "_";
@ -152,19 +137,13 @@ public:
}
private:
static std::shared_ptr<Model> CreateFunction(const Shape& input_shape,
const Shape& output_shape,
const element::Type& input_type,
const element::Type& output_type,
const std::vector<size_t> outShapeInput,
const element::Type& outShapeInputType,
const std::vector<float>& scales,
op::v4::Interpolate::InterpolateAttrs& attrs) {
const auto node_input = std::make_shared<op::v0::Parameter>(input_type, input_shape);
const auto node_output_shape_input = op::v0::Constant::create(outShapeInputType, outShapeInput, output_shape);
const auto node_scales = op::v0::Constant::create(element::Type_t::f32, {scales.size()}, scales);
static std::shared_ptr<Model> CreateFunction(const InterpolateV4Params& params) {
const auto node_input = std::make_shared<op::v0::Parameter>(params.inType, params.inShape);
const auto node_output_shape_input =
op::v0::Constant::create(params.outShapeInputType, params.outShapeInput, params.outShape);
const auto node_scales = op::v0::Constant::create(element::Type_t::f32, {params.scales.size()}, params.scales);
auto interpolate =
std::make_shared<op::v4::Interpolate>(node_input, node_output_shape_input, node_scales, attrs);
std::make_shared<op::v4::Interpolate>(node_input, node_output_shape_input, node_scales, params.attrs);
return std::make_shared<Model>(NodeVector{interpolate}, ParameterVector{node_input});
}
};
@ -237,7 +216,7 @@ std::vector<InterpolateV1Params> generateCombinedParamsForInterpolateV1() {
std::vector<InterpolateV1Params> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
std::move(params.begin(), params.end(), std::back_inserter(combinedParams));
}
return combinedParams;
@ -253,8 +232,8 @@ std::vector<InterpolateV4Params> generateCombinedParamsForInterpolateV4() {
std::vector<InterpolateV4Params> combinedParams;
for (const auto& params : allTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
for (auto& params : allTypeParams) {
std::move(params.begin(), params.end(), std::back_inserter(combinedParams));
}
return combinedParams;
@ -270,4 +249,547 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_V4_With_Hardcoded_Refs,
::testing::ValuesIn(generateCombinedParamsForInterpolateV4()),
ReferenceInterpolateV4LayerTest::getTestCaseName);
} // namespace type_tests
namespace attribute_tests {
using InterpolateAttrs = op::v4::Interpolate::InterpolateAttrs;
using InterpolateMode = op::v4::Interpolate::InterpolateMode;
using ShapeCalcMode = op::v4::Interpolate::ShapeCalcMode;
using CoordinateTransformMode = op::v4::Interpolate::CoordinateTransformMode;
using TransformMode = op::v4::Interpolate::CoordinateTransformMode;
using NearestMode = op::v4::Interpolate::NearestMode;
struct InterpolateV4TestParams {
std::string test_name;
Shape input_data_shape;
std::vector<int64_t> spatial_shape_data;
Shape output_shape;
std::vector<float> scales_data;
std::vector<int64_t> axes_data;
InterpolateAttrs attrs;
std::vector<float> input_data;
std::vector<float> expected_results;
};
std::vector<InterpolateV4TestParams> generateParamsForInterpolate_v4_cubic() {
const auto input_data_shape = Shape{1, 1, 4, 4};
const std::vector<float> input_data =
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0};
const std::vector<size_t> zero_pads{0, 0, 0, 0};
// clang-format off
return {
{ "cubic.resize_downsample_scales_cubic",
input_data_shape,
{3, 3},
Shape{1, 1, 3, 3},
{0.8f, 0.8f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{1.47119141, 2.78125, 4.08251953, 6.71142578, 8.02148438, 9.32275391, 11.91650391, 13.2265625, 14.52783203},
},
{ "cubic.resize_downsample_sizes_cubic",
input_data_shape,
{3, 3},
Shape{1, 1, 3, 3},
{0.75f, 0.75f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{1.63078704f, 3.00462963f, 4.37847222f, 7.12615741f, 8.5f, 9.87384259f, 12.62152778f, 13.99537037f,
15.36921296f},
},
{ "cubic.resize_upsample_scales_cubic",
input_data_shape,
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{0.47265625f, 0.76953125f, 1.24609375f, 1.875f, 2.28125f, 2.91015625f, 3.38671875f, 3.68359375f,
1.66015625f, 1.95703125f, 2.43359375f, 3.0625f, 3.46875f, 4.09765625f, 4.57421875f, 4.87109375f,
3.56640625f, 3.86328125f, 4.33984375f, 4.96875f, 5.375f, 6.00390625f, 6.48046875f, 6.77734375f,
6.08203125f, 6.37890625f, 6.85546875f, 7.484375f, 7.890625f, 8.51953125f, 8.99609375f, 9.29296875f,
7.70703125f, 8.00390625f, 8.48046875f, 9.109375f, 9.515625f, 10.14453125f, 10.62109375f, 10.91796875f,
10.22265625f, 10.51953125f, 10.99609375f, 11.625f, 12.03125f, 12.66015625f, 13.13671875f, 13.43359375f,
12.12890625f, 12.42578125f, 12.90234375f, 13.53125f, 13.9375f, 14.56640625f, 15.04296875f, 15.33984375f,
13.31640625f, 13.61328125f, 14.08984375f, 14.71875f, 15.125f, 15.75390625f, 16.23046875f, 16.52734375f},
},
{ "cubic.resize_upsample_scales_cubic_asymmetric",
input_data_shape,
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ASYMMETRIC,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{1.0f, 1.40625f, 2.0f, 2.5f, 3.0f, 3.59375f, 4.0f, 4.09375f, 2.625f, 3.03125f,
3.625f, 4.125f, 4.625f, 5.21875f, 5.625f, 5.71875f, 5.0f, 5.40625f, 6.0f, 6.5f,
7.0f, 7.59375f, 8.0f, 8.09375f, 7.0f, 7.40625f, 8.0f, 8.5f, 9.0f, 9.59375f,
10.0f, 10.09375f, 9.0f, 9.40625f, 10.0f, 10.5f, 11.0f, 11.59375f, 12.0f, 12.09375f,
11.375f, 11.78125f, 12.375f, 12.875f, 13.375f, 13.96875f, 14.375f, 14.46875f, 13.0f, 13.40625f,
14.0f, 14.5f, 15.0f, 15.59375f, 16.0f, 16.09375f, 13.375f, 13.78125f, 14.375f, 14.875f,
15.375f, 15.96875f, 16.375f, 16.46875f},
},
{ "cubic.resize_upsample_sizes_cubic",
input_data_shape,
{9, 10},
Shape{1, 1, 9, 10},
{2.25f, 2.5f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{0.45507922, 0.64057922, 0.97157922, 1.42257922, 1.90732922, 2.22332922, 2.70807922, 3.15907922,
3.49007922, 3.67557922, 1.39437963, 1.57987963, 1.91087963, 2.36187963, 2.84662963, 3.16262963,
3.64737963, 4.09837963, 4.42937963, 4.61487963, 2.95130693, 3.13680693, 3.46780693, 3.91880693,
4.40355693, 4.71955693, 5.20430693, 5.65530693, 5.98630693, 6.17180693, 5.20525069, 5.39075069,
5.72175069, 6.17275069, 6.65750069, 6.97350069, 7.45825069, 7.90925069, 8.24025069, 8.42575069,
6.88975, 7.07525, 7.40625, 7.85725, 8.342, 8.658, 9.14275, 9.59375,
9.92475, 10.11025, 8.57424931, 8.75974931, 9.09074931, 9.54174931, 10.02649931, 10.34249931,
10.82724931, 11.27824931, 11.60924931, 11.79474931, 10.82819307, 11.01369307, 11.34469307, 11.79569307,
12.28044307, 12.59644307, 13.08119307, 13.53219307, 13.86319307, 14.04869307, 12.38512037, 12.57062037,
12.90162037, 13.35262037, 13.83737037, 14.15337037, 14.63812037, 15.08912037, 15.42012037, 15.60562037,
13.32442078, 13.50992078, 13.84092078, 14.29192078, 14.77667078, 15.09267078, 15.57742078, 16.02842078,
16.35942078, 16.54492078},
},
{ "cubic.resize_downsample_scales_cubic_align_corners",
input_data_shape,
{3, 3},
Shape{1, 1, 3, 3},
{0.8f, 0.8f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{1.0f, 2.5f, 4.0f, 7.0f, 8.5f, 10.0f, 13.0f, 14.5f, 16.0f},
},
{ "cubic.resize_upsample_scales_cubic_align_corners",
input_data_shape,
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::CUBIC,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::ROUND_PREFER_FLOOR},
input_data,
{1.0, 1.34110787, 1.80029155, 2.32944606, 2.67055394, 3.19970845, 3.65889213, 4.0,
2.36443149, 2.70553936, 3.16472303, 3.69387755, 4.03498542, 4.56413994, 5.02332362, 5.36443149,
4.20116618, 4.54227405, 5.00145773, 5.53061224, 5.87172012, 6.40087464, 6.86005831, 7.20116618,
6.31778426, 6.65889213, 7.1180758, 7.64723032, 7.98833819, 8.51749271, 8.97667638, 9.31778426,
7.68221574, 8.02332362, 8.48250729, 9.01166181, 9.35276968, 9.8819242, 10.34110787, 10.68221574,
9.79883382, 10.13994169, 10.59912536, 11.12827988, 11.46938776, 11.99854227, 12.45772595, 12.79883382,
11.63556851, 11.97667638, 12.43586006, 12.96501458, 13.30612245, 13.83527697, 14.29446064, 14.63556851,
13.0, 13.34110787, 13.80029155, 14.32944606, 14.67055394, 15.19970845, 15.65889213, 16.0}
}
};
// clang-format on
}
std::vector<InterpolateV4TestParams> generateParamsForInterpolate_v4_nearest() {
const std::vector<size_t> zero_pads{0, 0, 0, 0};
// clang-format off
return {
{ "nearest.resize_downsample_scales_nearest",
Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.6f, 0.6f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0f, 3.0f},
},
{ "nearest.resize_downsample_sizes_nearest",
Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.5f, 0.5f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0f, 3.0f},
},
{ "nearest.resize_downsample_sizes_nearest_tf_half_pixel_for_nn",
Shape{1, 1, 4, 4},
{3, 2},
Shape{1, 1, 3, 2},
{0.75, 0.5},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f},
},
{ "nearest.resize_upsample_scales_nearest",
Shape{1, 1, 2, 2},
{4, 6},
Shape{1, 1, 4, 6},
{2.0f, 3.0f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f},
},
{ "nearest.resize_upsample_sizes_nearest",
Shape{1, 1, 2, 2},
{7, 8},
Shape{1, 1, 7, 8},
{3.5f, 4.0f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f,
2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 2.0f, 1.0f, 1.0f, 1.0f, 1.0f,
2.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 4.0f, 3.0f, 3.0f,
3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 4.0f},
},
{ "nearest.resize_upsample_sizes_nearest_ceil_half_pixel",
Shape{1, 1, 4, 4},
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::CEIL},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f,
8.0f, 8.0f, 8.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f, 8.0f, 8.0f, 8.0f, 9.0f, 10.0f,
10.0f, 11.0f, 11.0f, 12.0f, 12.0f, 12.0f, 9.0f, 10.0f, 10.0f, 11.0f, 11.0f, 12.0f, 12.0f,
12.0f, 13.0f, 14.0f, 14.0f, 15.0f, 15.0f, 16.0f, 16.0f, 16.0f, 13.0f, 14.0f, 14.0f, 15.0f,
15.0f, 16.0f, 16.0f, 16.0f, 13.0f, 14.0f, 14.0f, 15.0f, 15.0f, 16.0f, 16.0f, 16.0f},
},
{ "nearest.resize_upsample_sizes_nearest_floor_align_corners",
Shape{1, 1, 4, 4},
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f,
1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f, 8.0f,
5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 9.0f, 10.0f, 10.0f, 11.0f, 11.0f, 12.0f,
9.0f, 9.0f, 9.0f, 10.0f, 10.0f, 11.0f, 11.0f, 12.0f, 13.0f, 13.0f, 13.0f, 14.0f, 14.0f, 15.0f, 15.0f, 16.0f},
},
{ "nearest.resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric",
Shape{1, 1, 4, 4},
{8, 8},
Shape{1, 1, 8, 8},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::NEAREST,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::ASYMMETRIC,
NearestMode::ROUND_PREFER_CEIL},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0,
5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 12.0, 12.0,
9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 12.0, 12.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0,
13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0},
},
};
// clang-format on
}
std::vector<InterpolateV4TestParams> generateParamsForInterpolate_v4_linear_onnx() {
const std::vector<size_t> zero_pads{0, 0, 0, 0};
// clang-format off
return {
{ "linear_onnx.resize_downsample_scales_linear",
Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.6f, 0.6f},
{2, 3},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{2.6666665f, 4.3333331f},
},
{ "linear_onnx.resize_downsample_sizes_linear_pytorch_half_pixel",
Shape{1, 1, 4, 4},
{3, 1},
Shape{1, 1, 3, 1},
{0.75f, 0.25f},
{2, 3},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::PYTORCH_HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f},
{1.6666666f, 7.0f, 12.333333f},
},
{ "linear_onnx.resize_upsample_scales_linear",
Shape{1, 1, 2, 2},
{4, 4},
Shape{1, 1, 4, 4},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 1.25f, 1.75f, 2.0f, 1.5f, 1.75f, 2.25f, 2.5f, 2.5f, 2.75f, 3.25f, 3.5f, 3.0f, 3.25f, 3.75f, 4.0f},
},
{ "linear_onnx.resize_upsample_scales_linear_align_corners",
Shape{1, 1, 2, 2},
{4, 4},
Shape{1, 1, 4, 4},
{2.0f, 2.0f},
{2, 3},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f},
{1.0f, 1.33333333f, 1.66666667f, 2.0f, 1.66666667f, 2.0f, 2.33333333f, 2.66666667f,
2.33333333f, 2.66666667f, 3.0f, 3.33333333f, 3.0f, 3.33333333f, 3.66666667f, 4.0f},
},
{ "linear_onnx.resize_downsample_scales_linear_align_corners",
Shape{1, 1, 2, 4},
{1, 2},
Shape{1, 1, 1, 2},
{0.6f, 0.6f},
{2, 3},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0f, 4.0f},
}
};
// clang-format on
}
std::vector<InterpolateV4TestParams> generateParamsForInterpolate_v4_linear_onnx5d() {
const std::vector<size_t> zero_pads{0, 0, 0, 0, 0};
// clang-format off
return {
{ "linear_onnx5d.resize_downsample_scales_linear",
Shape{1, 1, 3, 2, 4},
{2, 1, 2},
Shape{1, 1, 2, 1, 2},
{0.8f, 0.6f, 0.6f},
{2, 3, 4},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f},
{3.6666665, 5.333333, 13.666666, 15.333333}
},
{ "linear_onnx5d.resize_downsample_scales_linear_align_corners",
Shape{1, 1, 3, 2, 4},
{2, 1, 2},
Shape{1, 1, 2, 1, 2},
{0.8f, 0.6f, 0.6f},
{2, 3, 4},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f},
{1.0, 4.0, 17.0, 20.0}
},
{ "linear_onnx5d.resize_upsample_scales_linear",
Shape{1, 1, 2, 2, 2},
{4, 4, 4},
Shape{1, 1, 4, 4, 4},
{2.0, 2.0, 2.0},
{2, 3, 4},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0, 1.25, 1.75, 2.0, 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25, 3.5, 3.0, 3.25, 3.75, 4.0,
2.0, 2.25, 2.75, 3.0, 2.5, 2.75, 3.25, 3.5, 3.5, 3.75, 4.25, 4.5, 4.0, 4.25, 4.75, 5.0,
4.0, 4.25, 4.75, 5.0, 4.5, 4.75, 5.25, 5.5, 5.5, 5.75, 6.25, 6.5, 6.0, 6.25, 6.75, 7.0,
5.0, 5.25, 5.75, 6.0, 5.5, 5.75, 6.25, 6.5, 6.5, 6.75, 7.25, 7.5, 7.0, 7.25, 7.75, 8.0}
},
{ "linear_onnx5d.resize_upsample_scales_linear_align_corners",
Shape{1, 1, 2, 2, 2},
{4, 4, 4},
Shape{1, 1, 4, 4, 4},
{2.0, 2.0, 2.0},
{2, 3, 4},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SCALES,
zero_pads,
zero_pads,
CoordinateTransformMode::ALIGN_CORNERS,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f},
{1.0, 1.3333333, 1.6666667, 2.0, 1.6666666, 2.0, 2.3333335, 2.6666667, 2.3333333, 2.6666665,
3.0, 3.3333335, 3.0, 3.3333333, 3.6666665, 4.0, 2.3333335, 2.6666665, 3.0, 3.3333333,
3.0, 3.333333, 3.6666665, 3.9999995, 3.6666665, 4.0, 4.3333335, 4.6666665, 4.333333, 4.6666665,
4.9999995, 5.333333, 3.6666667, 4.0, 4.3333335, 4.6666665, 4.3333335, 4.6666665, 5.0, 5.333333,
5.0, 5.3333335, 5.666667, 6.0, 5.666667, 5.9999995, 6.333333, 6.666667, 5.0, 5.333333,
5.6666665, 6.0, 5.666667, 5.9999995, 6.333333, 6.666666, 6.3333335, 6.666666, 7.0, 7.3333335,
7.0, 7.333333, 7.6666675, 8.0}
},
{ "linear_onnx5d.resize_downsample_sizes_linear_pytorch_half_pixel",
Shape{1, 1, 2, 4, 4},
{1, 3, 1},
Shape{1, 1, 1, 3, 1},
{0.5, 0.75, 0.25},
{2, 3, 4},
{ InterpolateMode::LINEAR_ONNX,
ShapeCalcMode::SIZES,
zero_pads,
zero_pads,
CoordinateTransformMode::PYTORCH_HALF_PIXEL,
NearestMode::ROUND_PREFER_FLOOR},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f,
12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f,
23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f},
{1.6666667, 7.0, 12.333333}
}
};
// clang-format on
}
std::vector<InterpolateV4TestParams> generateCombinedParamsForInterpolate_v4() {
const std::vector<std::vector<InterpolateV4TestParams>> allTypeParams{
generateParamsForInterpolate_v4_cubic(),
generateParamsForInterpolate_v4_nearest(),
generateParamsForInterpolate_v4_linear_onnx(),
generateParamsForInterpolate_v4_linear_onnx5d()};
std::vector<InterpolateV4TestParams> combinedParams;
for (auto& params : allTypeParams)
std::move(params.begin(), params.end(), std::back_inserter(combinedParams));
return combinedParams;
}
class ReferenceInterpolate_v4 : public testing::TestWithParam<InterpolateV4TestParams>, public CommonReferenceTest {
public:
void SetUp() override {
const auto& param = GetParam();
function = CreateFunction(param);
inputData = {CreateTensor(param.input_data_shape, element::f32, param.input_data)};
refOutData = {CreateTensor(param.output_shape, element::f32, param.expected_results)};
}
static std::string getTestCaseName(const testing::TestParamInfo<InterpolateV4TestParams>& obj) {
return obj.param.test_name;
}
private:
static std::shared_ptr<Model> CreateFunction(const InterpolateV4TestParams& param) {
auto image = std::make_shared<op::v0::Parameter>(element::f32, param.input_data_shape);
const auto& spatial_shape_data = param.spatial_shape_data;
auto target_spatial_shape =
op::v0::Constant::create<int64_t>(element::i64, Shape{spatial_shape_data.size()}, spatial_shape_data);
const auto& scales_data = param.scales_data;
auto scales = op::v0::Constant::create<float>(element::f32, Shape{scales_data.size()}, scales_data);
const auto& axes_data = param.axes_data;
auto axes = op::v0::Constant::create<int64_t>(element::i64, Shape{axes_data.size()}, axes_data);
auto interpolate = std::make_shared<op::v4::Interpolate>(image, target_spatial_shape, scales, axes, param.attrs);
return std::make_shared<Model>(NodeVector{interpolate}, ParameterVector{image});
}
};
TEST_P(ReferenceInterpolate_v4, LayerTest) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(smoke,
ReferenceInterpolate_v4,
::testing::ValuesIn(generateCombinedParamsForInterpolate_v4()),
ReferenceInterpolate_v4::getTestCaseName);
} // namespace attribute_tests
} // namespace

View File

@ -604,10 +604,36 @@ std::vector<MatMulParams> generateParamsForMatMulWithGeneratedInput() {
true),
};
return params;
}
template <element::Type_t ET>
std::vector<MatMulParams> generateParamsForMatMulWithSameBatchSize() {
using T = typename element_type_traits<ET>::value_type;
const auto input0_shapes = Shape{3, 2, 2, 2};
const auto input1_shapes = Shape{3, 2, 2, 1};
std::vector<T> input0_data(shape_size(input0_shapes));
std::vector<T> input1_data(shape_size(input1_shapes));
std::iota(input0_data.begin(), input0_data.end(), 1);
std::iota(input1_data.begin(), input1_data.end(), 0);
return std::vector<MatMulParams>{
MatMulParams(input0_shapes,
input1_shapes,
Shape{3, 2, 2, 1},
ET,
ET,
ET,
input0_data,
input1_data,
std::vector<T>{2, 4, 28, 38, 86, 104, 176, 202, 298, 332, 452, 494},
false,
false,
false),
};
}
std::vector<MatMulParams> generateCombinedParamsForMatMul() {
const std::vector<std::vector<MatMulParams>> allTypeParams{
generateParamsForMatMul<element::Type_t::f32>(),
@ -619,6 +645,8 @@ std::vector<MatMulParams> generateCombinedParamsForMatMul() {
generateParamsForMatMulWithGeneratedInput<element::Type_t::f32>(),
generateParamsForMatMulWithGeneratedInput<element::Type_t::i64>(),
generateParamsForMatMulWithGeneratedInput<element::Type_t::i32>(),
generateParamsForMatMulWithSameBatchSize<element::Type_t::f32>(),
generateParamsForMatMulWithSameBatchSize<element::Type_t::i64>(),
};
std::vector<MatMulParams> combinedParams;

View File

@ -46,12 +46,8 @@ public:
result << "aShape=" << param.axisTensor.shape << "_";
result << "nSplit=" << param.numSplits << "_";
result << "eType=" << param.expectedTensors[0].type << "_";
if (param.testcaseName != "") {
result << "eShape=" << param.expectedTensors[0].shape << "_";
result << "eShape=" << param.testcaseName;
} else {
result << "eShape=" << param.expectedTensors[0].shape;
}
result << "eShape=" << param.expectedTensors[0].shape << "_";
result << "eShape=" << param.testcaseName;
return result.str();
}
@ -159,12 +155,21 @@ std::vector<SplitParams> generateSplitParams() {
reference_tests::Tensor({2, 1, 2, 2}, IN_ET, std::vector<T>{2, 3, 8, 9, 14, 15, 20, 21}),
reference_tests::Tensor({2, 1, 2, 2}, IN_ET, std::vector<T>{4, 5, 10, 11, 16, 17, 22, 23})},
"split_4d_axis_3"),
// split_4d_axis_negative_2
SplitParams(reference_tests::Tensor({2, 1, 4, 1}, IN_ET, std::vector<T>{0, 1, 2, 3, 4, 5, 6, 7}),
reference_tests::Tensor({}, element::i32, std::vector<int32_t>{-2}),
2,
std::vector<reference_tests::Tensor>{
reference_tests::Tensor({2, 1, 2, 1}, IN_ET, std::vector<T>{0, 1, 4, 5}),
reference_tests::Tensor({2, 1, 2, 1}, IN_ET, std::vector<T>{2, 3, 6, 7})},
"split_4d_axis_negative_2"),
};
return splitParams;
}
std::vector<SplitParams> generateSplitCombinedParams() {
const std::vector<std::vector<SplitParams>> splitTypeParams {
generateSplitParams<element::Type_t::boolean>(),
generateSplitParams<element::Type_t::i8>(),
generateSplitParams<element::Type_t::i16>(),
generateSplitParams<element::Type_t::i32>(),

View File

@ -8,28 +8,38 @@
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "base_reference_test.hpp"
#include "util/type_prop.hpp"
using namespace reference_tests;
using namespace ov;
namespace {
struct TransposeParams {
TransposeParams(const PartialShape& dynamicDataShape, const reference_tests::Tensor& dataTensor, const reference_tests::Tensor& axisTensor,
const reference_tests::Tensor& expectedTensor, const std::string& testcaseName = "") :
dynamicDataShape(dynamicDataShape), dataTensor(dataTensor), axisTensor(axisTensor),
expectedTensor(expectedTensor), testcaseName(testcaseName) {}
TransposeParams(const PartialShape& dynamicDataShape,
const reference_tests::Tensor& dataTensor,
const reference_tests::Tensor& axisTensor,
const reference_tests::Tensor& expectedTensor,
const std::string& testcaseName,
const std::pair<std::string, std::string>& expectedException = {})
: dynamicDataShape(dynamicDataShape),
dataTensor(dataTensor),
axisTensor(axisTensor),
expectedTensor(expectedTensor),
testcaseName(testcaseName),
expectedException(expectedException) {}
PartialShape dynamicDataShape;
reference_tests::Tensor dataTensor;
reference_tests::Tensor axisTensor;
reference_tests::Tensor expectedTensor;
std::string testcaseName;
std::pair<std::string,std::string> expectedException;
};
class ReferenceTransposeLayerTest : public testing::TestWithParam<TransposeParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
const auto& params = GetParam();
function = CreateFunction(params);
if (params.dynamicDataShape.is_static()) {
inputData = {params.dataTensor.data};
@ -40,7 +50,7 @@ public:
}
static std::string getTestCaseName(const testing::TestParamInfo<TransposeParams>& obj) {
auto param = obj.param;
const auto& param = obj.param;
std::ostringstream result;
result << "ddShape=" << param.dynamicDataShape;
result << "_dType=" << param.dataTensor.type;
@ -48,12 +58,8 @@ public:
result << "_aType=" << param.axisTensor.type;
result << "_aShape=" << param.axisTensor.shape;
result << "_eType=" << param.expectedTensor.type;
if (param.testcaseName != "") {
result << "_eShape=" << param.expectedTensor.shape;
result << "_=" << param.testcaseName;
} else {
result << "_eShape=" << param.expectedTensor.shape;
}
result << "_eShape=" << param.expectedTensor.shape;
result << "_=" << param.testcaseName;
return result.str();
}
@ -79,7 +85,21 @@ private:
};
TEST_P(ReferenceTransposeLayerTest, CompareWithRefs) {
Exec();
const auto& params = GetParam();
if (params.expectedException.first.empty()) {
Exec();
} else {
try {
Exec();
FAIL() << params.expectedException.second;
} catch (const ov::Exception& error) {
EXPECT_HAS_SUBSTRING(error.what(), params.expectedException.first);
} catch (const std::exception& error) {
FAIL() << "Failed for unexpected reason: " << error.what();
} catch (...) {
FAIL() << "Failed for unknown reason";
}
}
}
template <element::Type_t IN_ET>
@ -146,6 +166,31 @@ std::vector<TransposeParams> generateTransposeParams() {
return transposeParams;
}
template <element::Type_t IN_ET>
std::vector<TransposeParams> generateThrowingTransposeParams() {
using T = typename element_type_traits<IN_ET>::value_type;
return std::vector<TransposeParams>{
TransposeParams(PartialShape::dynamic(),
reference_tests::Tensor(IN_ET, {2, 3, 1}, std::vector<T>{1, 2, 3, 4, 5, 6}),
reference_tests::Tensor(element::i64, {3}, std::vector<int64_t>{2, 1, 2}),
reference_tests::Tensor(IN_ET, {2, 3, 1}, std::vector<T>{1, 2, 3, 4, 5, 6}),
"duplicated_axes_values",
{"not valid for input shape", "Duplicated axes values not detected"}),
TransposeParams(PartialShape::dynamic(),
reference_tests::Tensor(IN_ET, {2, 3, 1}, std::vector<T>{1, 2, 3, 4, 5, 6}),
reference_tests::Tensor(element::i64, {3}, std::vector<int64_t>{0, 1, 3}),
reference_tests::Tensor(IN_ET, {2, 3, 1}, std::vector<T>{1, 2, 3, 4, 5, 6}),
"out_of_shape_axes_values",
{"not valid for input shape", "Out of shape axes not detected"}),
TransposeParams(PartialShape::dynamic(),
reference_tests::Tensor(IN_ET, {2, 3, 1}, std::vector<T>{1, 2, 3, 4, 5, 6}),
reference_tests::Tensor(element::i64, {3}, std::vector<int64_t>{-1, -2, -3}),
reference_tests::Tensor(IN_ET, {2, 3, 1}, std::vector<T>{1, 4, 2, 5, 3, 6}),
"negative_axes_values",
{"not valid for input shape", "Negative axes for Transpose were not supported before"}),
};
}
std::vector<TransposeParams> generateTransposeCombinedParams() {
const std::vector<std::vector<TransposeParams>> transposeTypeParams {
generateTransposeParams<element::Type_t::i8>(),
@ -158,6 +203,8 @@ std::vector<TransposeParams> generateTransposeCombinedParams() {
generateTransposeParams<element::Type_t::u64>(),
generateTransposeParams<element::Type_t::f16>(),
generateTransposeParams<element::Type_t::f32>(),
generateThrowingTransposeParams<element::Type_t::f32>(),
generateThrowingTransposeParams<element::Type_t::i32>(),
};
std::vector<TransposeParams> combinedParams;

View File

@ -187,8 +187,8 @@ std::vector<VariadicSplitParams> generateVariadicSplitParams() {
40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63}),
reference_tests::Tensor(element::i32, {1}, std::vector<int32_t>{1}),
reference_tests::Tensor(element::i32, {4}, std::vector<int32_t>{1, 3, 2, 2}),
reference_tests::Tensor(element::u64, {1}, std::vector<uint64_t>{1}),
reference_tests::Tensor(element::u64, {4}, std::vector<uint64_t>{1, 3, 2, 2}),
std::vector<reference_tests::Tensor>{reference_tests::Tensor(IN_ET, {2, 1, 2, 2}, std::vector<T>{0, 1, 2, 3, 32, 33, 34, 35}),
reference_tests::Tensor(IN_ET, {2, 3, 2, 2}, std::vector<T>{4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 36, 37, 38, 39,
@ -208,8 +208,8 @@ std::vector<VariadicSplitParams> generateVariadicSplitParams() {
40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63}),
reference_tests::Tensor(element::i32, {1}, std::vector<int32_t>{1}),
reference_tests::Tensor(element::i32, {4}, std::vector<int32_t>{1, 3, 2, 2}),
reference_tests::Tensor(element::i64, {1}, std::vector<int64_t>{1}),
reference_tests::Tensor(element::i64, {4}, std::vector<int64_t>{1, 3, 2, 2}),
std::vector<reference_tests::Tensor>{reference_tests::Tensor(IN_ET, {2, 1, 2, 2}, std::vector<T>{0, 1, 2, 3, 32, 33, 34, 35}),
reference_tests::Tensor(IN_ET, {2, 3, 2, 2}, std::vector<T>{4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 36, 37, 38, 39,
@ -224,8 +224,8 @@ std::vector<VariadicSplitParams> generateVariadicSplitParams() {
reference_tests::Tensor(IN_ET, {2, 1, 6, 2}, std::vector<T>{0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23}),
reference_tests::Tensor(element::i32, {1}, std::vector<int32_t>{2}),
reference_tests::Tensor(element::i32, {3}, std::vector<int32_t>{3, 1, 2}),
reference_tests::Tensor(element::u32, {1}, std::vector<uint32_t>{2}),
reference_tests::Tensor(element::u32, {3}, std::vector<uint32_t>{3, 1, 2}),
std::vector<reference_tests::Tensor>{reference_tests::Tensor(IN_ET, {2, 1, 3, 2}, std::vector<T>{0, 1, 2, 3, 4, 5, 12, 13,
14, 15, 16, 17}),
reference_tests::Tensor(IN_ET, {2, 1, 1, 2}, std::vector<T>{6, 7, 18, 19}),
@ -254,13 +254,13 @@ std::vector<VariadicSplitParams> generateVariadicSplitParams() {
reference_tests::Tensor(IN_ET, {2, 1, 2, 2}, std::vector<T>{1, 2, 7, 8, 13, 14, 19, 20}),
reference_tests::Tensor(IN_ET, {2, 1, 2, 3}, std::vector<T>{3, 4, 5, 9, 10, 11, 15, 16,
17, 21, 22, 23})},
"variadic_split_4d_axis_3_static"),
"variadic_split_4d_axis_neg1_static"),
// variadic_split_4d_axis_3_dynamic
VariadicSplitParams(PartialShape::dynamic(),
reference_tests::Tensor(IN_ET, {2, 1, 2, 6}, std::vector<T>{0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23}),
reference_tests::Tensor(element::i32, {1}, std::vector<int32_t>{3}),
reference_tests::Tensor(element::i32, {1}, std::vector<int32_t>{-1}),
reference_tests::Tensor(element::i32, {3}, std::vector<int32_t>{1, 2, -1}),
std::vector<reference_tests::Tensor>{reference_tests::Tensor(IN_ET, {2, 1, 2, 1}, std::vector<T>{0, 6, 12, 18}),
reference_tests::Tensor(IN_ET, {2, 1, 2, 2}, std::vector<T>{1, 2, 7, 8, 13, 14, 19, 20}),