Removed deprecated layer tests (#17153)

This commit is contained in:
Ryszard Jezierski
2023-05-16 16:03:39 +02:00
committed by GitHub
parent 6ae318d6e3
commit 850d35eafa
8 changed files with 0 additions and 952 deletions

View File

@@ -1,154 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include <single_layer_common.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include "../gna_matcher.hpp"
using GNAAlignFilterTestParams = std::tuple<InferenceEngine::Precision, std::size_t, std::size_t>;
class GNAAlignFilterTest : public GNATest<>,
public testing::WithParamInterface<GNAAlignFilterTestParams> {
public:
static std::string getTestName(const testing::TestParamInfo<GNAAlignFilterTestParams>& params) {
std::string test_name;
test_name += "fast_";
test_name += "concat_of(" + std::to_string(std::get<1>(params.param));
test_name += "_" + std::to_string(std::get<2>(params.param));
test_name += ")_on_";
test_name += std::get<0>(params.param).name();
return test_name;
}
protected:
InferenceEngine::Precision precision = InferenceEngine::Precision::FP32;
std::size_t concat_inputs[2];
void SetUp() override {
std::tie(precision, concat_inputs[0], concat_inputs[1]) = GetParam();
}
std::shared_ptr<ngraph::Function> getNgraphModel() {
auto input0 = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, concat_inputs[0]});
auto input1 = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, concat_inputs[1]});
auto relu0 = std::make_shared<ngraph::op::v0::Relu>(input0);
auto relu1 = std::make_shared<ngraph::op::v0::Relu>(input1);
auto concat = std::make_shared<ngraph::op::Concat>(ngraph::NodeVector{relu0, relu1}, 1);
auto relu3 = std::make_shared<ngraph::op::v0::Relu>(concat);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{relu3}, ngraph::ParameterVector{input0, input1});
return function;
}
};
TEST_P(GNAAlignFilterTest, concatWith_2_Inputs_Small_mem_footprint) {
auto ngraf = getNgraphModel();
if (precision == InferenceEngine::Precision::FP32) {
GTEST_SKIP() << "FP32 case - won't produce gna primitives";
}
// calc expected weight size
size_t expected_affine_size = 0;
size_t expected_copy_layers = 0;
auto getFastAffineFilterParams = [](size_t sz) -> std::pair<size_t, size_t> {
//align first input by 8
auto copy_N = sz > 32 ? 1 : 0; // number of copy layers
auto firstFilter_frac = sz % 32;
auto firstFilter_N = ALIGN(firstFilter_frac, 8);
return {copy_N, firstFilter_N * firstFilter_frac};
};
auto getNumCopyElements = [&getFastAffineFilterParams](size_t sz) {
return getFastAffineFilterParams(sz).first;
};
auto getsNumFilterWeights = [&getFastAffineFilterParams](size_t sz) {
return getFastAffineFilterParams(sz).second;
};
expected_copy_layers = getNumCopyElements(concat_inputs[0]);
expected_affine_size = getsNumFilterWeights(concat_inputs[0]);
// calculation size for second filter
auto offset = ALIGN(concat_inputs[0], 32) - 32;
auto zerolen = concat_inputs[0] - offset;
auto second_output_len = zerolen + concat_inputs[1];
expected_affine_size += second_output_len * ALIGN(concat_inputs[1], 8);
assert_that().onInferNgraphModel(ngraf)
.inNotCompactMode()
.withGNAConfig(std::string(GNA_CONFIG_KEY(SCALE_FACTOR)) + "_0", 1.0f)
.withGNAConfig(std::string(GNA_CONFIG_KEY(SCALE_FACTOR)) + "_1", 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), precision.name())
.gna()
.affine_weights()
.size()
.equals_to(expected_affine_size)
.And()
.copy_inserted_into_nnet()
.times(expected_copy_layers);
}
TEST_P(GNAAlignFilterTest, concatWith_2_Inputs_accurate) {
auto ngraf = getNgraphModel();
if (precision == InferenceEngine::Precision::FP32) {
std::vector<std::vector<float>> input_data;
float start_value = 1.0;
for (auto dim : concat_inputs) {
if (dim > 0) {
input_data.push_back(std::vector<float>(dim));
std::iota(input_data.back().begin(), input_data.back().end(), start_value);
start_value += dim;
}
}
std::vector<float> expected_result(static_cast<size_t>(start_value - 1));
start_value = 1.0;
std::iota(expected_result.begin(), expected_result.end(), start_value);
assert_that().onInferNgraphModel(ngraf)
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with()
.input(ngraf->get_parameters().at(0)->get_name(), input_data[0])
.input(ngraf->get_parameters().at(1)->get_name(), input_data[1])
.equals_to(expected_result);
} else {
assert_that().onInferNgraphModel(ngraf)
.inNotCompactMode()
.gna()
.withGNAConfig(std::string(GNA_CONFIG_KEY(SCALE_FACTOR)) + "_0", 1.0f)
.withGNAConfig(std::string(GNA_CONFIG_KEY(SCALE_FACTOR)) + "_1", 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), "I16")
.propagate_forward()
.called();
}
}
INSTANTIATE_TEST_SUITE_P(
GNALayerTests,
GNAAlignFilterTest,
testing::Combine(
testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16),
// Size of first Split layer output
testing::Values(31, 49),
// Size of second Split layer output
testing::Values(31, 73)),
GNAAlignFilterTest::getTestName);

View File

@@ -1,112 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include "../gna_matcher.hpp"
struct Point2D {
std::size_t x = 0;
std::size_t y = 0;
};
using Conv1DParams = std::tuple<InferenceEngine::Precision, std::size_t, std::size_t, std::size_t, std::size_t>;
class GNAConv1DTest : public GNATest<>,
public testing::WithParamInterface<Conv1DParams> {
public:
static std::string getTestName(const testing::TestParamInfo<Conv1DParams>& params) {
std::string test_name = std::to_string(std::get<1>(params.param)) + "_kernel.x_";
test_name += std::to_string(std::get<2>(params.param)) + "_pad.x_";
test_name += std::to_string(std::get<3>(params.param)) + "_stride.x_";
test_name += std::to_string(std::get<4>(params.param)) + "_output_channels_";
test_name += std::get<0>(params.param).name();
return test_name;
}
protected:
InferenceEngine::Precision precision = InferenceEngine::Precision::FP32;
Point2D kernel;
Point2D pad;
Point2D stride;
std::size_t output_channels = 0;
std::size_t input_dim = 784;
void SetUp() override {
std::tie(precision, kernel.x, pad.x, stride.x, output_channels) = GetParam();
kernel.y = 1;
pad.y = 0;
stride.y = 0;
}
std::shared_ptr<ngraph::Function> getNgraphModel() {
auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, input_dim});
std::vector<int> shape {1, 1};
shape.push_back(input_dim);
auto input_reshaped = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape {shape.size()}, shape);
auto reshape = std::make_shared<ngraph::op::v1::Reshape>(input, input_reshaped, false);
auto filters = std::make_shared<ngraph::op::Constant>(
ngraph::element::f32,
ngraph::Shape {output_channels, kernel.y, kernel.x},
std::vector<float> {1.0f});
auto strides = ngraph::Strides(stride.x);
auto dilations = ngraph::Strides(stride.y);
auto pads_begin = ngraph::CoordinateDiff(pad.x);
auto pads_end = ngraph::CoordinateDiff(pad.y);
auto convolution = std::make_shared<ngraph::op::v1::Convolution>(
reshape,
filters,
strides,
pads_begin,
pads_end,
dilations);
auto weights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape {}, std::vector<float> {2.0f});
auto mul = std::make_shared<ngraph::op::v1::Multiply>(convolution, weights);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
return function;
}
};
TEST_P(GNAConv1DTest, SplitToConcatWith2Inputs) {
if (precision == InferenceEngine::Precision::FP32) {
std::vector<float> input_data(input_dim);
std::iota(input_data.begin(), input_data.end(), 1.0);
assert_that().onInferNgraphModel(getNgraphModel())
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with_input(input_data);
} else {
assert_that().onInferNgraphModel(getNgraphModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), "I16")
.propagate_forward()
.called();
}
}
INSTANTIATE_TEST_SUITE_P(
GNALayerTests,
GNAConv1DTest,
testing::Combine(
testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16),
testing::Values(1, 3, 9, 16, 24, 32, 42, 64),
testing::Values(0, 1),
testing::Values(0),
testing::Values(32, 128, 512)),
GNAConv1DTest::getTestName);

View File

@@ -1,115 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include <legacy/ngraph_ops/fully_connected.hpp>
#include "../gna_matcher.hpp"
using namespace InferenceEngine;
struct EltwiseTestParams {
InferenceEngine::Precision precision = InferenceEngine::Precision::FP32;
bool has_reshape_before_eltwise = false;
template <class T>
void decodeGtestParams(const T &);
};
using GNAEltwiseTestParams = std::tuple<
decltype(EltwiseTestParams::precision),
decltype(EltwiseTestParams::has_reshape_before_eltwise)>;
template <>
inline void EltwiseTestParams::decodeGtestParams<GNAEltwiseTestParams>(const GNAEltwiseTestParams & params) {
std::tie(precision, has_reshape_before_eltwise) = params;
}
class GNAEltwiseTest : public GNATest<>, public testing::WithParamInterface<GNAEltwiseTestParams>, public EltwiseTestParams {
public:
static std::string getTestName(const testing::TestParamInfo<GNAEltwiseTestParams>& params) {
EltwiseTestParams tp;
tp.decodeGtestParams(params.param);
std::stringstream test_name;
test_name << tp.precision << (tp.has_reshape_before_eltwise ? "_with_reshapes" : "");
return test_name.str();
}
protected:
void SetUp() override {
decodeGtestParams(GetParam());
}
std::shared_ptr<ngraph::Function> buildNgraphModel() {
auto input1 = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, 32});
const_cast<std::string&>(input1->get_name()) = "input1";
auto input2 = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, 32});
const_cast<std::string&>(input2->get_name()) = "input2";
auto weights = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{ 32, 32 }, {1});
auto biases = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{32}, {0});
std::shared_ptr<ngraph::op::Op> FC1 = std::make_shared<ngraph::op::FullyConnected>(input1, weights, biases, ngraph::Shape{ 1, 32});
std::shared_ptr<ngraph::op::Op> FC2 = std::make_shared<ngraph::op::FullyConnected>(input2, weights, biases, ngraph::Shape{ 1, 32});
if (has_reshape_before_eltwise) {
auto reshape_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{2},
std::vector<size_t>{1, 32});
FC1 = std::make_shared<ngraph::op::v1::Reshape>(FC1, reshape_pattern, false);
FC2 = std::make_shared<ngraph::op::v1::Reshape>(FC2, reshape_pattern, false);
}
auto add = std::make_shared<ngraph::op::v1::Add>(FC1, FC2);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{input1, input2});
return function;
}
};
TEST_P(GNAEltwiseTest, FourBytesInputsViaReshape) {
if (precision == InferenceEngine::Precision::FP32) {
std::vector<float> expected_result(32, 96.0f);
std::vector<float> input1(32, 1.0f);
std::vector<float> input2(32, 2.0f);
assert_that().onInferNgraphModel(buildNgraphModel())
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with()
.input("input1", input1)
.input("input2", input2)
.equals_to(expected_result);
} else {
assert_that().onInferNgraphModel(buildNgraphModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_0"), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_1"), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), Precision(precision).name())
.propagate_forward()
.called();
}
}
INSTANTIATE_TEST_SUITE_P(
GNALayerTests,
GNAEltwiseTest,
::testing::Combine(
::testing::Values(Precision::FP32, Precision::I16, Precision::I8),
::testing::Values(true, false)),
GNAEltwiseTest::getTestName);

View File

@@ -1,118 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include "../gna_matcher.hpp"
using MultiInputToConcatParams = std::tuple<InferenceEngine::Precision, std::size_t, std::size_t>;
class GNAMultiInputToConcatTest : public GNATest<>,
public testing::WithParamInterface<MultiInputToConcatParams> {
public:
static std::string getTestName(const testing::TestParamInfo<MultiInputToConcatParams>& params) {
std::string test_name = std::to_string(std::get<1>(params.param)) + "_inputs_";
test_name += std::to_string(std::get<2>(params.param)) + "_per_input_";
test_name += std::get<0>(params.param).name();
return test_name;
}
protected:
InferenceEngine::Precision precision = InferenceEngine::Precision::FP32;
std::size_t number_of_inputs = 0;
std::size_t dims_pre_input = 0;
const float MUL_VALUE = -1.0f;
void SetUp() override {
std::tie(precision, number_of_inputs, dims_pre_input) = GetParam();
}
std::tuple<std::vector<std::string>, std::shared_ptr<ngraph::Function>> getNgraphModelWithIO() {
ngraph::ParameterVector inputs;
std::vector<std::string> inputs_names;
ngraph::OutputVector outputs_from_inputs;
for (std::size_t i = 0; i < number_of_inputs; i++) {
auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, dims_pre_input });
inputs.push_back(input);
inputs_names.push_back(input->get_name());
outputs_from_inputs.push_back(input->outputs()[0]);
}
auto concat = std::make_shared<ngraph::op::Concat>(outputs_from_inputs, 1);
auto weights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{ 1 }, std::vector<float> {MUL_VALUE});
auto biases = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{ 1 }, std::vector<float> {0.0f});
auto mul = std::make_shared<ngraph::op::v1::Multiply>(concat, weights);
auto add = std::make_shared<ngraph::op::v1::Add>(mul, biases);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, inputs);
return std::make_tuple(inputs_names, function);
}
};
TEST_P(GNAMultiInputToConcatTest, InputsToConcat) {
if (precision == InferenceEngine::Precision::I16) {
GTEST_SKIP();
}
float start_from = 1.0f;
auto model_with_io_names = getNgraphModelWithIO();
auto input_names = std::get<0>(model_with_io_names);
std::vector<std::vector<float>> inputs;
for (std::size_t i = 0; i < number_of_inputs; i++) {
std::vector<float> input_data(dims_pre_input);
std::iota(input_data.begin(), input_data.end(), start_from);
start_from = input_data[input_data.size() - 1] + 1.0f;
inputs.push_back(input_data);
}
if (precision == InferenceEngine::Precision::FP32) {
auto test_object = assert_that().onInferNgraphModel(std::get<1>(model_with_io_names))
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with();
std::vector<float> expected_result;
for (std::size_t i = 0; i < number_of_inputs; i++) {
test_object.input(input_names[i], inputs[i]);
expected_result.insert(expected_result.end(), inputs[i].begin(), inputs[i].end());
}
for (std::size_t i = 0; i < expected_result.size(); i++) {
expected_result[i] *= MUL_VALUE;
}
test_object.equals_to(expected_result);
} else {
auto test_object = assert_that().onInferNgraphModel(std::get<1>(model_with_io_names))
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), "I16")
.propagate_forward()
.called();
for (std::size_t i = 0; i < number_of_inputs; i++) {
test_object.input(input_names[i], inputs[i]);
}
}
}
INSTANTIATE_TEST_SUITE_P(
GNALayerTests,
GNAMultiInputToConcatTest,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16),
// Number of inputs to Concat layer
::testing::Values(2, 3, 4, 5, 6, 7, 8, 9, 10, 32, 96),
// Size of each input
::testing::Values(1, 2, 3, 8, 9, 10, 15, 16, 32, 42, 48, 50, 64, 96, 100, 128, 132)),
GNAMultiInputToConcatTest::getTestName);

View File

@@ -1,75 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "permute_irs.hpp"
#include <gtest/gtest.h>
#include "../gna_matcher.hpp"
using namespace GNATestIRs::Permute;
typedef struct { Permute3dimCaseParam test_param; bool supported; } Permute3dimTestParam;
class GNAPermute3dTest : public GNATest<::testing::TestWithParam<Permute3dimTestParam>> {
};
static std::string getPermute3dTestName(testing::TestParamInfo<Permute3dimTestParam> obj) {
std::string test_name = "order";
for (int n = 0; n < 3; n++) {
test_name += "_" + std::to_string(obj.param.test_param.order[n]);
}
test_name += "_dim";
for (int n = 0; n < 3; n++) {
test_name += "_" + std::to_string(obj.param.test_param.dim[n]);
}
return test_name;
}
TEST_P(GNAPermute3dTest, Permute3dim) {
auto test_param = GetParam().test_param;
auto supported = GetParam().supported;
std::vector<float> input_data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0};
std::vector<float> weights = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1};
std::vector<float> expected_result(2);
for (int res_index = 0; res_index < expected_result.size(); res_index++) {
for (int n = 0; n < input_data.size(); n++) {
expected_result[res_index] += input_data[n] * weights[res_index * input_data.size() + n];
}
}
auto& test_instance =
assert_that().onInferModel(Permute3dimModel_v6(test_param))
.inNotCompactMode()
.withWeigthsPattern(std::move(weights))
.gna()
.propagate_forward()
.onCPU()
.called_with_input(input_data);
if (supported) {
test_instance.equals_to(expected_result);
} else {
test_instance.throws();
}
}
const Permute3dimTestParam gna_permute3d_test_params[] = {
{{{1, 0, 2}, {1, 2, 4}}, true},
{{{1, 0, 2}, {2, 1, 4}}, true},
{{{1, 0, 2}, {1, 4, 2}}, true},
{{{1, 0, 2}, {4, 1, 2}}, true},
{{{1, 0, 2}, {1, 8, 1}}, true},
{{{1, 0, 2}, {8, 1, 1}}, true},
{{{1, 0, 2}, {4, 2, 1}}, false},
{{{1, 0, 2}, {2, 4, 1}}, false},
{{{1, 2, 0}, {1, 2, 4}}, true},
{{{0, 1, 2}, {1, 2, 4}}, true},
{{{0, 2, 1}, {2, 1, 4}}, true},
{{{2, 0, 1}, {1, 2, 4}}, false},
{{{2, 1, 0}, {2, 1, 4}}, false}
};
INSTANTIATE_TEST_SUITE_P(GNALayerTests, GNAPermute3dTest,
::testing::ValuesIn(gna_permute3d_test_params), getPermute3dTestName);

View File

@@ -1,96 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include <single_layer_common.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include "../gna_matcher.hpp"
using SplitToConcatTestParams = std::tuple<InferenceEngine::Precision, std::size_t, std::size_t>;
class GNASplitToConcatTest : public GNATest<>,
public testing::WithParamInterface<SplitToConcatTestParams> {
public:
static std::string getTestName(const testing::TestParamInfo<SplitToConcatTestParams>& params) {
std::string test_name = "first_" + std::to_string(std::get<1>(params.param)) + "_second_";
test_name += std::to_string(std::get<2>(params.param)) + "_";
test_name += std::get<0>(params.param).name();
return test_name;
}
protected:
InferenceEngine::Precision precision = InferenceEngine::Precision::FP32;
std::size_t input_dim = 0;
std::size_t first_split_output = 0;
std::size_t second_split_output = 0;
const float MUL_VALUE = -1.0f;
void SetUp() override {
std::tie(precision, first_split_output, second_split_output) = GetParam();
input_dim = first_split_output + second_split_output;
}
std::shared_ptr<ngraph::Function> getNgraphModel() {
std::vector<std::size_t> split_desc_vector({first_split_output, second_split_output});
auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, input_dim});
auto split = std::make_shared<ngraph::op::v1::VariadicSplit>(input,
ngraph::op::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}),
ngraph::op::Constant::create(ngraph::element::i64, ngraph::Shape{split_desc_vector.size()}, split_desc_vector));
auto concat = std::make_shared<ngraph::op::Concat>(split->outputs(), 1);
auto weights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape {1}, std::vector<float> {MUL_VALUE});
auto biases = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape {1}, std::vector<float> {0.0f});
auto mul = std::make_shared<ngraph::op::v1::Multiply>(concat, weights);
auto add = std::make_shared<ngraph::op::v1::Add>(mul, biases);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
return function;
}
};
TEST_P(GNASplitToConcatTest, SplitToConcatWith2Inputs) {
if (precision == InferenceEngine::Precision::FP32) {
std::vector<float> input_data(input_dim);
std::iota(input_data.begin(), input_data.end(), 1.0);
std::vector<float> expected_result(input_dim);
for (std::size_t i = 0; i < expected_result.size(); i++) {
expected_result[i] = input_data[i] * MUL_VALUE;
}
assert_that().onInferNgraphModel(getNgraphModel())
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with_input(input_data)
.equals_to(expected_result);
} else {
assert_that().onInferNgraphModel(getNgraphModel())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), "I16")
.propagate_forward()
.called();
}
}
INSTANTIATE_TEST_SUITE_P(
GNALayerTests,
GNASplitToConcatTest,
testing::Combine(
testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16),
// Size of first Split layer output
testing::Values(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 32, 42, 50, 64, 96, 100, 128, 132),
// Size of second Split layer output
testing::Values(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 32, 42, 50, 64, 96, 100, 128, 132)),
GNASplitToConcatTest::getTestName);

View File

@@ -1,157 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include <single_layer_common.hpp>
#include <ngraph/op/parameter.hpp>
#include <ngraph/ops.hpp>
#include <ie_precision.hpp>
#include <debug.h>
#include "../gna_matcher.hpp"
using namespace InferenceEngine::details;
typedef struct {
std::vector<size_t> input_shape;
std::vector<size_t> squeeze_indices;
} SqueezeCaseParam;
using SqueezeTestParam = std::tuple<InferenceEngine::Precision, bool, SqueezeCaseParam>;
class GNASqueezeTest_ : public GNATest<>,
public testing::WithParamInterface<SqueezeTestParam> {
public:
static std::string getTestName(const testing::TestParamInfo<SqueezeTestParam>& params) {
std::stringstream test_name;
test_name << std::get<0>(params.param) << "_";
test_name << (std::get<1>(params.param) ? "squeeze" : "unsqueeze") << "_";
test_name << std::get<2>(params.param).input_shape << "_";
test_name << std::get<2>(params.param).squeeze_indices;
return test_name.str();
}
protected:
const float MUL_VALUE = -1.0f;
template <class SqueezeType>
std::shared_ptr<ngraph::Function> getNgraphModel(const SqueezeCaseParam& param) {
const std::size_t input_dim = InferenceEngine::details::product(param.input_shape);
auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, input_dim});
auto reshape1_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{param.input_shape.size()},
param.input_shape);
auto reshape1 = std::make_shared<ngraph::op::v1::Reshape>(input, reshape1_pattern, false);
auto squeeze_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{param.squeeze_indices.size()},
param.squeeze_indices);
auto squeeze = std::make_shared<SqueezeType>(reshape1, squeeze_axes);
auto reshape2_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{2},
std::vector<size_t>{1, input_dim});
auto reshape2 = std::make_shared<ngraph::op::v1::Reshape>(squeeze, reshape2_pattern, false);
auto weights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1},
std::vector<float>{MUL_VALUE});
auto mul = std::make_shared<ngraph::op::v1::Multiply>(reshape2, weights);
auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
return function;
}
void runtest() {
InferenceEngine::Precision precision;
bool is_squeeze;
SqueezeCaseParam param;
std::tie(precision, is_squeeze, param) = GetParam();
std::size_t input_dim = InferenceEngine::details::product(param.input_shape);
auto buildNgraphFunction = [&]() {
if (is_squeeze) {
return getNgraphModel<ngraph::op::v0::Squeeze>(param);
}
return getNgraphModel<ngraph::op::v0::Unsqueeze>(param);
};
if (precision == InferenceEngine::Precision::FP32) {
std::vector<float> input_data(input_dim);
std::iota(input_data.begin(), input_data.end(), 1.0);
std::vector<float> expected_result(input_dim);
for (std::size_t i = 0; i < expected_result.size(); i++) {
expected_result[i] = input_data[i] * MUL_VALUE;
}
assert_that().onInferNgraphModel(buildNgraphFunction())
.inNotCompactMode()
.gna()
.propagate_forward()
.onCPU()
.called_with_input(input_data)
.equals_to(expected_result);
} else {
assert_that().onInferNgraphModel(buildNgraphFunction())
.inNotCompactMode()
.gna()
.withGNAConfig(GNA_CONFIG_KEY(SCALE_FACTOR), 1.0f)
.withGNAConfig(GNA_CONFIG_KEY(PRECISION), "I16")
.propagate_forward()
.called();
}
}
};
class GNASqueezeTest : public GNASqueezeTest_{};
TEST_P(GNASqueezeTest, SqueezeTest) {
runtest();
}
class GNAUnsqueezeTest : public GNASqueezeTest_{};
TEST_P(GNAUnsqueezeTest, UnsqueezeTest) {
runtest();
}
static const SqueezeCaseParam gna_squeeze_test_params[] = {
{{1, 1, 3}, {0, 1}},
{{1, 1, 3}, {0}},
{{1, 1, 3}, {1}},
{{1, 3, 1}, {0, 2}},
{{1, 3, 1}, {0}},
{{1, 3, 1}, {2}},
{{3, 1, 1}, {1, 2}},
{{3, 1, 1}, {1}},
{{3, 1, 1}, {2}},
{{4, 1, 3, 1}, {1, 3}},
{{4, 1, 1, 3}, {1, 2}},
{{1, 4, 1, 3}, {0, 2}},
{{1, 3, 5, 2, 1}, {0, 4}},
{{3, 1, 2, 4, 4, 3}, {1}},
{{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}},
{{1, 1, 1, 1, 1, 3}, {1, 3}}
};
INSTANTIATE_TEST_SUITE_P(
GNALayerTests, GNASqueezeTest,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16),
::testing::Values(true),
::testing::ValuesIn(gna_squeeze_test_params)),
GNAUnsqueezeTest::getTestName);
INSTANTIATE_TEST_SUITE_P(
GNALayerTests, GNAUnsqueezeTest,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32, InferenceEngine::Precision::I16),
::testing::Values(false),
::testing::ValuesIn(gna_squeeze_test_params)),
GNAUnsqueezeTest::getTestName);

View File

@@ -1,125 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <array>
#include "single_layer_common.hpp"
namespace GNATestIRs {
namespace Permute {
typedef struct { std::array<int,3> order; std::array<int,3> dim; } Permute3dimCaseParam;
inline std::string Permute3dimModel_v6(const Permute3dimCaseParam &test_param) {
std::string ir = R"V0G0N(
<?xml version="1.0" ?>
<net batch="1" name="Permute3dim_v6" version="6">
<layers>
<layer id="0" name="Input_1" precision="FP32" type="Input">
<output>
<port id="0">
<dim>1</dim>
<dim>__FULL_SIZE__</dim>
</port>
</output>
</layer>
<layer id="1" name="Reshape_1" precision="FP32" type="Reshape">
<data dim="__DIM0__,__DIM1__,__DIM2__"/>
<input>
<port id="0">
<dim>1</dim>
<dim>__FULL_SIZE__</dim>
</port>
</input>
<output>
<port id="1">
<dim>__DIM0__</dim>
<dim>__DIM1__</dim>
<dim>__DIM2__</dim>
</port>
</output>
</layer>
<layer id="2" name="Permute_1" precision="FP32" type="Permute">
<data order="__ORDER0__,__ORDER1__,__ORDER2__"/>
<input>
<port id="0">
<dim>__DIM0__</dim>
<dim>__DIM1__</dim>
<dim>__DIM2__</dim>
</port>
</input>
<output>
<port id="1">
<dim>__NEW_DIM0__</dim>
<dim>__NEW_DIM1__</dim>
<dim>__NEW_DIM2__</dim>
</port>
</output>
</layer>
<layer id="3" name="Reshape_2" precision="FP32" type="Reshape">
<data dim="1,__FULL_SIZE__"/>
<input>
<port id="0">
<dim>__NEW_DIM0__</dim>
<dim>__NEW_DIM1__</dim>
<dim>__NEW_DIM2__</dim>
</port>
</input>
<output>
<port id="1">
<dim>1</dim>
<dim>__FULL_SIZE__</dim>
</port>
</output>
</layer>
<layer id="4" name="output_fc" precision="FP32" type="FullyConnected">
<data out-size="__FC_DIM__"/>
<input>
<port id="0">
<dim>1</dim>
<dim>__FULL_SIZE__</dim>
</port>
</input>
<output>
<port id="3">
<dim>1</dim>
<dim>__FC_DIM__</dim>
</port>
</output>
<blobs>
<weights offset="0" size="__WEIGHTS_SIZE__"/>
</blobs>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
<edge from-layer="2" from-port="1" to-layer="3" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="4" to-port="0"/>
</edges>
</net>
)V0G0N";
std::array<int, 3> new_dim;
for (int n = 0; n < 3; n++) {
new_dim[n] = test_param.dim[test_param.order[n]];
}
const int full_size = test_param.dim[0] * test_param.dim[1] * test_param.dim[2];
const int fc_dim = 2;
REPLACE_WITH_NUM(ir, "__ORDER0__", test_param.order[0]);
REPLACE_WITH_NUM(ir, "__ORDER1__", test_param.order[1]);
REPLACE_WITH_NUM(ir, "__ORDER2__", test_param.order[2]);
REPLACE_WITH_NUM(ir, "__DIM0__", test_param.dim[0]);
REPLACE_WITH_NUM(ir, "__DIM1__", test_param.dim[1]);
REPLACE_WITH_NUM(ir, "__DIM2__", test_param.dim[2]);
REPLACE_WITH_NUM(ir, "__NEW_DIM0__", new_dim[0]);
REPLACE_WITH_NUM(ir, "__NEW_DIM1__", new_dim[1]);
REPLACE_WITH_NUM(ir, "__NEW_DIM2__", new_dim[2]);
REPLACE_WITH_NUM(ir, "__FULL_SIZE__", full_size);
REPLACE_WITH_NUM(ir, "__FC_DIM__", fc_dim);
REPLACE_WITH_NUM(ir, "__WEIGHTS_SIZE__", full_size * fc_dim * sizeof(float));
return ir;
}
} }