[IE TESTS] Remove some deprecated single layer tests (#2646)

* [IE TESTS] Remove some deprecated single layer tests

* Gather Tree
This commit is contained in:
Irina Efode 2020-10-14 00:58:28 +03:00 committed by GitHub
parent 8331c397cd
commit e79a44c02e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 173 additions and 5405 deletions

View File

@ -42,7 +42,8 @@ std::vector<CommonTestUtils::OpType> opTypes = {
std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {
ngraph::helpers::EltwiseTypes::MULTIPLY,
ngraph::helpers::EltwiseTypes::SUBTRACT,
ngraph::helpers::EltwiseTypes::ADD
ngraph::helpers::EltwiseTypes::ADD,
ngraph::helpers::EltwiseTypes::POWER
};
std::map<std::string, std::string> additional_config = {};

View File

@ -0,0 +1,38 @@
// Copyright (C) 2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "single_layer_tests/gather_tree.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::I32
};
const std::vector<std::vector<size_t>> inputShapes = { {5, 1, 10}, {1, 1, 10}, {20, 1, 10}, {20, 20, 10} };
const std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER
};
INSTANTIATE_TEST_CASE_P(Basic_smoke, GatherTreeLayerTest,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(secondaryInputTypes),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
GatherTreeLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,43 @@
// Copyright (C) 2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "single_layer_tests/reverse_sequence.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
const std::vector<int64_t> batchAxisIndices = { 0L };
const std::vector<int64_t> seqAxisIndices = { 1L };
const std::vector<std::vector<size_t>> inputShapes = { {3, 10} }; //, 10, 20
const std::vector<std::vector<size_t>> reversSeqLengthsVecShapes = { {3} };
const std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER
};
INSTANTIATE_TEST_CASE_P(Basic_smoke, ReverseSequenceLayerTest,
::testing::Combine(
::testing::ValuesIn(batchAxisIndices),
::testing::ValuesIn(seqAxisIndices),
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(reversSeqLengthsVecShapes),
::testing::ValuesIn(secondaryInputTypes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
ReverseSequenceLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,43 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "single_layer_tests/squeeze_unsqueeze.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
std::map<std::vector<size_t>, std::vector<std::vector<int>>> axesVectors = {
{{1, 1, 1, 1}, {{-1}, {0}, {1}, {2}, {3}, {0, 1}, {0, 2}, {0, 3}, {1, 2}, {2, 3}} },
{{1, 2, 3, 4}, {{0}}},
{{2, 1, 3, 4}, {{1}}},
{{1}, {{-1}, {0}}},
{{1, 2}, {{0}}},
{{2, 1}, {{1}, {-1}}},
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<ngraph::helpers::SqueezeOpType> opTypes = {
ngraph::helpers::SqueezeOpType::SQUEEZE,
ngraph::helpers::SqueezeOpType::UNSQUEEZE
};
INSTANTIATE_TEST_CASE_P(smoke_Basic, SqueezeUnsqueezeLayerTest,
::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(axesVectors)),
::testing::ValuesIn(opTypes),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
SqueezeUnsqueezeLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,40 @@
// Copyright (C) 2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "single_layer_tests/variadic_split.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
// Sum of elements numSplits = inputShapes[Axis]
const std::vector<std::vector<size_t>> numSplits = {
{1, 16, 5, 8},
{2, 19, 5, 4},
{7, 13, 2, 8},
{5, 8, 12, 5},
{4, 11, 6, 9}
};
INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, VariadicSplitLayerTest,
::testing::Combine(
::testing::ValuesIn(numSplits),
::testing::Values(0, 1, 2, 3),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
VariadicSplitLayerTest::getTestCaseName);
} // namespace

View File

@ -19,5 +19,12 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*CoreThreadingTestsWithIterations.*smoke_LoadNetwork.*)",
// TODO: Issue: 39612
R"(.*Interpolate.*cubic.*tf_half_pixel_for_nn.*FP16.*)",
// Expected behavior
R"(.*EltwiseLayerTest.*eltwiseOpType=Pow.*netPRC=I64.*)",
R"(.*EltwiseLayerTest.*IS=\(.*\..*\..*\..*\..*\).*eltwiseOpType=Pow.*secondaryInputType=CONSTANT.*)",
// TODO: Issue: 40736
R"(.*ReverseSequenceLayerTest.*)",
// TODO: Issue: 40741
R"(.*GatherTreeLayerTest.*)",
};
}

View File

@ -1,18 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "activation_tests.hpp"
activation_test_params test_cases[] = {
activation_test_params("GPU", case_1, "relu"),
activation_test_params("GPU", case_1, "exp"),
activation_test_params("GPU", case_1, "not"),
activation_test_params("GPU", case_1, "sin"),
activation_test_params("GPU", case_1, "sinh"),
activation_test_params("GPU", case_1, "cos"),
activation_test_params("GPU", case_1, "cosh"),
};
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestsActivationFunctions, ActivationTest, ::testing::ValuesIn(test_cases), getTestCaseName);

View File

@ -1,84 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "arg_max_min_tests.hpp"
static std::vector<float> in_data = { 0.0f, 1.0f,
20.0f, 12.0f,
12.0f, 0.0f,
15.0f, 8.0f,
9.0f, 4.0f,
25.0f, 15.0f,
0.0f, 0.0f,
1.0f, 1.0f,
0.0f, 0.0f,
24.0f, 12.0f,
8.0f, 9.0f,
2.0f, 14.0 };
INSTANTIATE_TEST_CASE_P(
nightly_GPU_TestsArgMaxMin, ArgMaxMinTFTests,
::testing::Values(
// Params: device_name, in_dim, in_data, has_axis, out_max_val, top_k, axis, ref_dim
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 0, { 1, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 1, { 2, 1, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 2, { 2, 3, 1, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 3, { 2, 3, 2, 1 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 0, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 1, { 2, 2, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 2, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 3, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 3, 1, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 0, { 1, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 1, { 2, 1, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 2, { 2, 3, 1, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 3, { 2, 3, 2, 1 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 0, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 1, { 2, 2, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 2, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 3, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
1, 0, 3, 1, { 2, 3, 2, 2 } }
));

View File

@ -1,33 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "conv_tests.hpp"
conv_test_params conv_only_test_cases[] = {
conv_test_params("GPU", case_1),
conv_test_params("GPU", case_2),
conv_test_params("GPU", case_3),
conv_test_params("GPU", case_4),
conv_test_params("GPU", case_5),
conv_test_params("GPU", case_6),
conv_test_params("GPU", case_7),
conv_test_params("GPU", case_8),
conv_test_params("GPU", case_9),
conv_test_params("GPU", case_10),
conv_test_params("GPU", case_11),
conv_test_params("GPU", case_12),
conv_test_params("GPU", case_13),
conv_test_params("GPU", case_14)
};
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestConvolution, ConvolutionOnlyTest, ::testing::ValuesIn(conv_only_test_cases), getTestCaseName);
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestConvolutionBlobsAsInputs, ConvolutionBlobsAsInputsTest, ::testing::ValuesIn(conv_only_test_cases), getTestCaseName);
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestConvolutionSameUpper, ConvolutionReshapeTest,
::testing::Values(conv_test_params("GPU", case_si_1)),
getTestCaseName);

View File

@ -1,33 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "eltwise_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestEltwise, EltwiseOnlyTest,
::testing::Values(
eltwise_test_params{"GPU", {13, 13, 1}, eltwise_test_params::Sum, 5},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Max, 3},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Prod, 3},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Sub, 3},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Min, 7},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Div, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Squared_diff, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Equal, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Not_equal, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Less, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Less_equal, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Greater, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Greater_equal, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Logical_AND, 3},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Logical_OR, 4},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Logical_XOR, 4},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Floor_mod, 2},
eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Pow, 2}
// TODO: Add tests for 1D/2D/3D blobs
));
/*** TBD ***/

View File

@ -1,24 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gather_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestsGather, GatherTFTests,
::testing::Values(
gatherTF_test_params{ "GPU", "FP32", { 1, 4 }, in0,{ 2, 2 }, dict2D, 0, { 1, 4, 1, 2 }, ref_in0_a0_d22 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 1, 2, 3 }, ref_in0_a0_d223 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 1, 2, 3 }, ref_in0_a0_d223 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict, 0, { 2, 2, 1, 2, 2 }, ref_in1_a0_d322 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict,-3, { 2, 2, 1, 2, 2 }, ref_in1_a0_d322 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict, 1, { 2, 2, 2, 2, 1 }, ref_in1_a1_d232 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict,-2, { 2, 2, 2, 2, 1 }, ref_in1_a1_d232 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2, 1 }, ref_in1_a2_d223 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2, 1 }, ref_in1_a2_d223 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2, 1 }, ref_in0_a2_d232 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict,-1, { 2, 3, 2, 2, 1 }, ref_in0_a2_d232 },
gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2, 1 }, ref_in0_a2_d232 }
));

View File

@ -1,10 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "pad_tests.hpp"
PLUGING_CASE(GPU, PadTFTests, 1, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "constant", 0.f, { 6, 9 }, ref_constant);
PLUGING_CASE(GPU, PadTFTests, 2, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "edge", 0.f, { 6, 9 }, ref_edge);
PLUGING_CASE(GPU, PadTFTests, 3, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "reflect", 0.f, { 6, 9 }, ref_reflect);
PLUGING_CASE(GPU, PadTFTests, 4, { 3, 4 }, in, { 2, 2 }, { 1, 3 },"symmetric", 0.f, { 6, 9 }, ref_symmetric);

View File

@ -1,14 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "softmax_tests.hpp"
softmax_test_params softmax_only_test_cases[] = {
softmax_test_params("GPU", case_1),
softmax_test_params("GPU", case_8),
softmax_test_params("GPU", case_8_nc, "2D"),
};
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestsSoftmax, SoftmaxOnlyTest, ::testing::ValuesIn(softmax_only_test_cases)/*, getTestCaseName*/);

View File

@ -1,85 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "topk_tests.hpp"
INSTANTIATE_TEST_CASE_P(
nightly_GPU_TestsTopK, topk_test_fp32,
::testing::Values(
// Params: plugin_name, in_shape, input_tensor, axis, src_k, sort, mode, out_shape, reference_val, reference_idx
topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "max",{ 3, 1 }, Precision::FP32},
topk_test_params{ "GPU", { 3, 4 }, 0,{ 1 }, "value", "max",{ 1, 4 }, Precision::FP32},
topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "min",{ 3, 1 }, Precision::FP32},
topk_test_params{ "GPU", { 3, 4 }, 0,{ 1 }, "value", "min",{ 1, 4 }, Precision::FP32},
topk_test_params{ "GPU", { 2, 3, 128, 256 }, 1,{ 1 }, "value", "max",{ 2, 1, 128, 256 }, Precision::FP32},
topk_test_params{ "GPU", { 3, 5, 128, 256 }, 1,{ 1 }, "index", "max",{ 3, 1, 128, 256 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 3, 129, 257 }, 1,{ 1 }, "value", "max",{ 1, 1, 129, 257 }, Precision::FP32},
topk_test_params{ "GPU", { 2, 5, 129, 257 }, 1,{ 1 }, "index", "max",{ 2, 1, 129, 257 }, Precision::FP32},
topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "max",{ 3, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "min",{ 3, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "max",{ 1, 3, 1, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "min",{ 1, 3, 1, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 20, 12, 12 }, 1,{ 18 }, "value", "min",{ 1, 18, 12, 12 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 3 }, "value", "max",{ 1, 3, 129, 129 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "value", "max",{ 1, 2, 2, 1 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "index", "max",{ 1, 2, 2, 1 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "max",{ 1, 2, 3, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "max",{ 1, 2, 3, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "min",{ 1, 2, 3, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "min",{ 1, 2, 3, 2 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "max",{ 1, 18, 32, 32 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "max",{ 1, 18, 129, 129 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "min",{ 1, 18, 32, 32 }, Precision::FP32},
topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "min",{ 1, 18, 129, 129 }, Precision::FP32}
));
INSTANTIATE_TEST_CASE_P(
nightly_GPU_TestsTopK, topk_test_int32,
::testing::Values(
// Params: plugin_name, in_shape, input_tensor, axis, src_k, sort, mode, out_shape, reference_val, reference_idx
topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "max",{ 3, 1 }, Precision::I32},
topk_test_params{ "GPU", { 3, 4 }, 0,{ 1 }, "value", "max",{ 1, 4 }, Precision::I32},
topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "min",{ 3, 1 }, Precision::I32},
topk_test_params{ "GPU", { 3, 4 }, 0,{ 1 }, "value", "min",{ 1, 4 }, Precision::I32},
topk_test_params{ "GPU", { 2, 3, 128, 256 }, 1,{ 1 }, "value", "max",{ 2, 1, 128, 256 }, Precision::I32},
topk_test_params{ "GPU", { 3, 5, 128, 256 }, 1,{ 1 }, "index", "max",{ 3, 1, 128, 256 }, Precision::I32},
topk_test_params{ "GPU", { 1, 3, 129, 257 }, 1,{ 1 }, "value", "max",{ 1, 1, 129, 257 }, Precision::I32},
topk_test_params{ "GPU", { 2, 5, 129, 257 }, 1,{ 1 }, "index", "max",{ 2, 1, 129, 257 }, Precision::I32},
topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "max",{ 3, 3 }, Precision::I32},
topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "min",{ 3, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "max",{ 1, 3, 1, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "min",{ 1, 3, 1, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 20, 12, 12 }, 1,{ 18 }, "value", "min",{ 1, 18, 12, 12 }, Precision::I32},
topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 3 }, "value", "max",{ 1, 3, 129, 129 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "value", "max",{ 1, 2, 2, 1 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "index", "max",{ 1, 2, 2, 1 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "max",{ 1, 2, 3, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "max",{ 1, 2, 3, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "min",{ 1, 2, 3, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "min",{ 1, 2, 3, 2 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::I32},
topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "max",{ 1, 18, 32, 32 }, Precision::I32},
topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "max",{ 1, 18, 129, 129 }, Precision::I32},
topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "min",{ 1, 18, 32, 32 }, Precision::I32},
topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "min",{ 1, 18, 129, 129 }, Precision::I32}
));

View File

@ -1,22 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gather_tree_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_GPU_TestsGatherTree, GatherTreeTests,
::testing::Values(
// Params: in_out_shape, step_idx, parent_idx, max_seq_len, end_token, reference
gather_tree_test_params{ {3, 2, 3 }, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10}, {0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 0, 2, 1, 2, 2, 1, 1},
{3, 3 }, {11}, {2, 2, 2, 2, 4, 4, 6, 5, 6, 7, 6, 6, 7, 8, 9, 8, 9, 10}, "GPU"},
gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1}, {0, 0, 0, 0, 1, 1, 2, 1, 2, -1, -1, -1},
{3}, {10}, {2, 2, 2, 6, 5, 6, 7, 8, 9, 10, 10, 10}, "GPU"},
gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1},
{4}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 10, 10, 10}, "GPU"},
gather_tree_test_params{ {5, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 10, 3, 2, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1, 2, 0, 1},
{5}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 3, 1, 10, 2, 10, 10}, "GPU"},
gather_tree_test_params{ {4, 2, 3}, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10, 0, 0, 0, 11, 12, 0},
{0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 1, 2, 2, 0, 1, -1, -1, -1, 0, 1, 0},
{3, 4}, {11}, {2, 2, 2, 2, 3, 2, 6, 5, 6, 7, 5, 7, 7, 8, 9, 8, 9, 8, 11, 11, 11, 11, 12, 0}, "GPU"}
));

View File

@ -1,137 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct test_params {
std::string device_name;
std::string inPrecision;
InferenceEngine::SizeVector in_out_shape;
float power;
float scale;
float shift;
std::vector<float> reference;
};
template<typename data_t>
void ref_power(InferenceEngine::TBlob<float> &dst, test_params const& prm) {
data_t *dst_data = dst.data().as<data_t*>();
for (size_t i = 0; i < prm.in_out_shape.size(); ++i) {
dst_data[i] = std::pow(prm.shift + i * prm.scale, prm.power);
}
}
class PowerTests : public TestsCommon, public WithParamInterface<test_params> {
std::string model_t = R"V0G0N(
<net Name="Power_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="input" type="Input" precision="FP32" id="1">
<output>
<port id="1">
_IN_OUT_
</port>
</output>
</layer>
<layer name="output" id="2" type="Power" precision="FP32">
<data power="_POWER_" scale="_SCALE_" shift="_SHIFT_"/>
<input>
<port id="1">
_IN_OUT_
</port>
</input>
<output>
<port id="2">
_IN_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string getModel(test_params p) {
std::string model = model_t;
std::string in_out_shape;
for (size_t i = 0; i < p.in_out_shape.size(); i++) {
in_out_shape += "<dim>";
in_out_shape += std::to_string(p.in_out_shape[i]) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
REPLACE_WITH_NUM(model, "_POWER_", p.power);
REPLACE_WITH_NUM(model, "_SCALE_", p.scale);
REPLACE_WITH_NUM(model, "_SHIFT_", p.shift);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
test_params p = ::testing::WithParamInterface<test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
// Input Data
InputsDataMap inputInfo(net.getInputsInfo());
Blob::Ptr inputBlob = inferRequest.GetBlob(inputInfo.begin()->first);
float* inputData = inputBlob->buffer().as<float*>();
fill_data_dbgval(inputData, inputBlob->size());
inferRequest.Infer();
// Output Data
OutputsDataMap outputInfo(net.getOutputsInfo());
Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first);
// Output Reference
InferenceEngine::TBlob<float> dst_ref(outputBlob->getTensorDesc());
dst_ref.allocate();
ref_power<float>(dst_ref, p);
// Check results
if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0)
FAIL() << "Wrong result with compare TF reference!";
compare(*outputBlob, dst_ref);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(PowerTests, smoke_GPU_TestsPower) {}
std::vector<float> power_ref_0 = { 0.f, 1.f, 4.f, 9.f };
std::vector<float> power_ref_1 = { 0.f, 4.f, 16.f, 36.f };
INSTANTIATE_TEST_CASE_P(
smoke_TestsPower, PowerTests,
::testing::Values(
test_params{ "GPU", "FP32", { 1, 1, 2, 2 }, 2.f, 1.f, 0.f, power_ref_0 },
test_params{ "GPU", "FP32", { 1, 1, 2, 2 }, 2.f, 2.f, 0.f, power_ref_1 }
));

View File

@ -1,234 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct reverse_sequence_test_params {
std::string device_name;
std::string inPrecision;
SizeVector in_out_shape;
std::vector<int32_t> seq_lengths;
int seq_axis;
int batch_axis;
std::vector<float> reference;
};
template <typename data_t>
void ref_reverse_sequence(
TBlob<float> &src,
TBlob<data_t> &seq_lengths,
TBlob<float> &dst,
int seq_axis,
int batch_axis
) {
size_t i, src_idx;
const float *src_data = src.data();
SizeVector src_dims = src.getTensorDesc().getDims();
SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides();
const data_t *seq_lengths_data = seq_lengths.data();
SizeVector seq_lengths_dims = seq_lengths.getTensorDesc().getDims();
float* dst_data = dst.data();
if (seq_axis < 0)
seq_axis += src_dims.size();
if (seq_axis < 0 || seq_axis >= src_dims.size())
FAIL() << "Incorrect 'seq_axis' parameters dimensions and axis number!";
if (batch_axis < 0)
batch_axis += src_dims.size();
if (batch_axis < 0 || batch_axis >= src_dims.size())
FAIL() << "Incorrect 'batch_axis' parameters dimensions and axis number!";
for (i = 0; i < src_dims[batch_axis]; i++) {
if (static_cast<int32_t>(seq_lengths_data[i]) > src_dims[seq_axis])
FAIL() << "Incorrect input 'seq_lengths' values!";
}
size_t work_amount_dst = srcStrides[0] * src_dims[0];
SizeVector counters(src_dims.size(), 0);
for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) {
for (i = 0, src_idx = 0; i < src_dims.size(); ++i) {
size_t idx = counters[i];
if (i == seq_axis && idx < static_cast<int32_t>(seq_lengths_data[counters[batch_axis]])) {
idx = static_cast<int32_t>(seq_lengths_data[counters[batch_axis]]) - idx - 1;
}
src_idx += idx * srcStrides[i];
}
dst_data[iwork] = src_data[src_idx];
for (int j = src_dims.size() - 1; j >= 0; j--) {
counters[j] = (counters[j] + 1) % src_dims[j];
if (counters[j] != 0) break;
}
}
}
class ReverseSequenceTests : public TestsCommon, public WithParamInterface<reverse_sequence_test_params> {
std::string model_t = R"V0G0N(
<net Name="ReverseSequence_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="input" type="Input" precision="_INP_" id="1">
<output>
<port id="1">
_IN_OUT_
</port>
</output>
</layer>
<layer name="seq_lengths" type="Input" precision="FP32" id="2">
<output>
<port id="2">
<dim>_DIM_SIZE_</dim>
</port>
</output>
</layer>
<layer name="ReverseSequence" id="2" type="ReverseSequence" precision="FP32">
<data seq_axis="_SA_" batch_axis="_BA_"/>
<input>
<port id="1">
_IN_OUT_
</port>
<port id="2">
<dim>_DIM_SIZE_</dim>
</port>
</input>
<output>
<port id="3">
_IN_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
</edges>
</net>
)V0G0N";
std::string getModel(reverse_sequence_test_params p) {
std::string model = model_t;
std::string in_out_shape;
for (size_t i = 0; i < p.in_out_shape.size(); i++) {
in_out_shape += "<dim>";
in_out_shape += std::to_string(p.in_out_shape[i]) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_INP_", p.inPrecision);
REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.seq_lengths.size());
REPLACE_WITH_NUM(model, "_SA_", p.seq_axis);
REPLACE_WITH_NUM(model, "_BA_", p.batch_axis);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
reverse_sequence_test_params p = ::testing::WithParamInterface<reverse_sequence_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
// Output Data
OutputsDataMap out;
out = network.getOutputsInfo();
BlobMap outputBlobs;
std::pair<std::string, DataPtr> item = *out.begin();
TBlob<float>::Ptr output;
output = make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
// Output Reference
TBlob<float> dst_ref(item.second->getTensorDesc());
dst_ref.allocate();
// Input Data
auto src = make_shared_blob<float>({ Precision::FP32,
p.in_out_shape,
TensorDesc::getLayoutByDims(p.in_out_shape) });
src->allocate();
fill_data_dbgval(src->buffer(), src->size());
SizeVector seq_lengths_dim(1, p.seq_lengths.size());
auto seq_lengthsIdx = make_shared_blob<float>({ Precision::FP32,
seq_lengths_dim,
TensorDesc::getLayoutByDims(seq_lengths_dim) });
seq_lengthsIdx->allocate();
if (p.seq_lengths.size())
for (size_t i = 0; i < p.seq_lengths.size(); i++) {
static_cast<float *>(seq_lengthsIdx->buffer())[i] = static_cast<float>(p.seq_lengths[i]);
}
auto * seq_lengthsIdxPtr = dynamic_cast<TBlob<float>*>(seq_lengthsIdx.get());
if (seq_lengthsIdxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
ref_reverse_sequence(*srcPtr, *seq_lengthsIdxPtr, dst_ref, p.seq_axis, p.batch_axis);
if (p.reference.size()) {
if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0)
FAIL() << "Wrong result with compare TF reference!";
}
ExecutableNetwork executable_network = ie.LoadNetwork(network, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
inferRequest.SetBlob("input", src);
inferRequest.SetBlob("seq_lengths", seq_lengthsIdx);
inferRequest.SetBlob(item.first, output);
inferRequest.Infer();
// Check results
compare(*output, dst_ref);
} catch (const details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
// Test data vectors
static std::vector<float> test1 = { 3.f,4.f,5.f,0.f,1.f,2.f,6.f,7.f,8.f,12.f,13.f,14.f,9.f,10.f,11.f,15.f,16.f,17.f,21.f,22.f,23.f,18.f,19.f,20.f,24.f,25.f,26.f };
static std::vector<float> test2 = { 1.f,0.f,2.f,4.f,3.f,5.f,7.f,6.f,8.f,10.f,9.f,11.f,13.f,12.f,14.f,16.f,15.f,17.f,19.f,18.f,20.f,22.f,21.f,23.f,25.f,24.f,26.f };
static std::vector<float> test3 = { 2.f,1.f,0.f,4.f,3.f,5.f };
static std::vector<float> test4 = { 0.f,1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,12.f,13.f,14.f,9.f,10.f,11.f,15.f,16.f,17.f,24.f,25.f,26.f,21.f,22.f,23.f,18.f,19.f,20.f };
static std::vector<float> test5 = { 0.f,4.f,8.f,3.f,1.f,5.f,6.f,7.f,2.f,9.f,13.f,17.f,12.f,10.f,14.f,15.f,16.f,11.f,18.f,22.f,26.f,21.f,19.f,23.f,24.f,25.f,20.f };
static std::vector<float> test6 = { 0.f,1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f,11.f,13.f,12.f,15.f,14.f,17.f,16.f,19.f,18.f,21.f,20.f,23.f,22.f };
TEST_P(ReverseSequenceTests, smoke_GPU_TestsReverseSequence) {}
INSTANTIATE_TEST_CASE_P(
smoke_TestsReverseSequence, ReverseSequenceTests,
::testing::Values(
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 }, 1, 0, test1 },
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 }, -2, 0, test1 },
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 }, 2, 1, test2 },
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 }, -1, 1, test2 },
reverse_sequence_test_params{"GPU", "FP32", { 2, 3 },{ 3, 2 }, 1, 0, test3 },
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 1, 2, 3 }, 1, 0, test4 },
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 1, 2, 3 }, 1,-3, test4 },
reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 1, 2, 3 }, 1, 2, test5 },
reverse_sequence_test_params{"GPU", "FP32", { 2, 2, 3, 2 },{ 1, 2 }, 3, 0, test6 }
));

View File

@ -1,145 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "select_tests.hpp"
TEST_P(SelectTests, smoke_GPU_TestsSelectNoneBroadcast) {}
INSTANTIATE_TEST_CASE_P(
smoke_TestsSelectNoneBroadcast, SelectTests,
::testing::Values(
select_params{ "GPU", {1}, {1}, {1}, "none", false },
select_params{ "GPU", {17}, {17}, {17}, "none", false },
select_params{ "GPU", {33, 35}, {33, 35}, {33, 35}, "none", false },
select_params{ "GPU", {6, 7, 8}, {6, 7, 8}, {6, 7, 8}, "none", false },
select_params{ "GPU", {2, 3, 4, 5}, {2, 3, 4, 5}, {2, 3, 4, 5}, "none", false },
select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "none", false },
select_params{ "GPU", {8, 14, 32, 12}, {8, 14, 32, 12}, {8, 14, 32, 12}, "none", false },
select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "none", false }
));
INSTANTIATE_TEST_CASE_P(
smoke_TestsSelectNumpyBroadcast, SelectTests,
::testing::Values(
select_params{ "GPU", {1}, {1}, {1}, "numpy", false },
select_params{ "GPU", {17}, {17}, {17}, "numpy", false },
select_params{ "GPU", {33, 35}, {33, 35}, {33, 35}, "numpy", false },
select_params{ "GPU", {6, 7, 8}, {6, 7, 8}, {6, 7, 8}, "numpy", false },
select_params{ "GPU", {2, 3, 4, 5}, {2, 3, 4, 5}, {2, 3, 4, 5}, "numpy", false },
select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "numpy", false },
select_params{ "GPU", {8, 14, 32, 12}, {8, 14, 32, 12}, {8, 14, 32, 12}, "numpy", false },
select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "numpy", false },
select_params{ "GPU", {17}, {1}, {17}, "numpy", false },
select_params{ "GPU", {1}, {17}, {17}, "numpy", false },
select_params{ "GPU", {17}, {17}, {1}, "numpy", false },
select_params{ "GPU", {17}, {1}, {1}, "numpy", false },
select_params{ "GPU", {1}, {17}, {1}, "numpy", false },
select_params{ "GPU", {33, 1}, {33, 35}, {33, 35}, "numpy", false },
select_params{ "GPU", {33, 35}, {33, 35}, {35}, "numpy", false },
select_params{ "GPU", {33, 35}, {33, 35}, {1}, "numpy", false },
select_params{ "GPU", {35}, {33, 1}, {35}, "numpy", false },
select_params{ "GPU", {35, 9}, {24, 35, 9}, {24, 35, 9}, "numpy", false },
select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {35, 9}, "numpy", false },
select_params{ "GPU", {9}, {24, 35, 1}, {35, 9}, "numpy", false },
select_params{ "GPU", {24, 35, 1}, {35, 9}, {24, 35, 1}, "numpy", false },
select_params{ "GPU", {24, 1, 9}, {9}, {24, 1, 9}, "numpy", false },
select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {1}, "numpy", false },
select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {24, 1, 9}, "numpy", false },
select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {24, 35, 9}, "numpy", false },
select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "numpy", false },
select_params{ "GPU", {1}, {16, 32, 15, 54}, {16, 32, 1, 54}, "numpy", false },
select_params{ "GPU", {3, 24, 35, 9}, {24, 35, 9}, {3, 1, 35, 9}, "numpy", false },
select_params{ "GPU", {3, 24, 35, 9}, {9}, {3, 24, 35, 9}, "numpy", false },
select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 15, 54}, {16, 32, 1, 54}, "numpy", false },
select_params{ "GPU", {16, 32, 1, 1}, {16, 32, 15, 54}, {16, 32, 15, 54}, "numpy", false },
select_params{ "GPU", {8, 14, 32, 1}, {8, 14, 32, 12}, {32, 12}, "numpy", false },
select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 1, 1}, {16, 32, 1, 54}, "numpy", false },
select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 1, 54}, {16, 32, 15, 1}, "numpy", false },
select_params{ "GPU", {35, 9}, {3, 24, 1, 1}, {3, 24, 35, 9}, "numpy", false },
select_params{ "GPU", {3, 24, 1, 1}, {35, 9}, {35, 9}, "numpy", false },
select_params{ "GPU", {9}, {3, 1, 1, 1}, {3, 1, 1, 1}, "numpy", false }
));
INSTANTIATE_TEST_CASE_P(
smoke_TestsSelectNoneBroadcastError, SelectTests,
::testing::Values(
select_params{ "GPU", {1, 32, 15, 54}, {1, 32, 15, 54}, {16, 32, 15, 54}, "none", true },
select_params{ "GPU", {16, 1, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "none", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "none", true },
select_params{ "GPU", {16, 32, 15, 1}, {16, 32, 15, 1}, {16, 32, 15, 54}, "none", true },
select_params{ "GPU", {15, 32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "none", true },
select_params{ "GPU", {16, 33, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "none", true },
select_params{ "GPU", {16, 32, 16, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "none", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 56}, "none", true },
select_params{ "GPU", {3, 5, 35, 9}, {3, 24, 35, 7}, {3, 24, 35, 9}, "none", true },
select_params{ "GPU", {11, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 7, 9}, "none", true },
select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "none", true },
select_params{ "GPU", {11, 24, 35, 11}, {7, 13, 35, 9}, {3, 24, 27, 17}, "none", true },
select_params{ "GPU", {1}, {1}, {9}, "none", true },
select_params{ "GPU", {32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "none", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 33, 15, 54}, "none", true },
select_params{ "GPU", {16, 32, 1, 54}, {16, 32, 15, 1}, {16, 32, 2, 3}, "none", true },
select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 7, 14, 14}, "none", true },
select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 1, 1, 14}, "none", true },
select_params{ "GPU", {35, 9}, {35, 1}, {24, 35, 9}, "none", true },
select_params{ "GPU", {1}, {9}, {35, 9}, "none", true },
select_params{ "GPU", {17}, {1}, {17}, "none", true },
select_params{ "GPU", {1}, {17}, {17}, "none", true },
select_params{ "GPU", {17}, {17}, {1}, "none", true },
select_params{ "GPU", {17}, {1}, {1}, "none", true },
select_params{ "GPU", {1}, {17}, {1}, "none", true },
select_params{ "GPU", {33, 1}, {33, 35}, {33, 35}, "none", true },
select_params{ "GPU", {33, 35}, {33, 35}, {35}, "none", true },
select_params{ "GPU", {33, 35}, {33, 35}, {1}, "none", true },
select_params{ "GPU", {35}, {33, 1}, {35}, "none", true },
select_params{ "GPU", {35, 9}, {24, 35, 9}, {24, 35, 9}, "none", true },
select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {35, 9}, "none", true },
select_params{ "GPU", {9}, {24, 35, 1}, {35, 9}, "none", true },
select_params{ "GPU", {24, 35, 1}, {35, 9}, {24, 35, 1}, "none", true },
select_params{ "GPU", {24, 1, 9}, {9}, {24, 1, 9}, "none", true },
select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {1}, "none", true },
select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {24, 1, 9}, "none", true },
select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {24, 35, 9}, "none", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "none", true },
select_params{ "GPU", {1}, {16, 32, 15, 54}, {16, 32, 1, 54}, "none", true },
select_params{ "GPU", {3, 24, 35, 9}, {24, 35, 9}, {3, 1, 35, 9}, "none", true },
select_params{ "GPU", {3, 24, 35, 9}, {9}, {3, 24, 35, 9}, "none", true },
select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 15, 54}, {16, 32, 1, 54}, "none", true },
select_params{ "GPU", {16, 32, 1, 1}, {16, 32, 15, 54}, {16, 32, 15, 54}, "none", true },
select_params{ "GPU", {8, 14, 32, 1}, {8, 14, 32, 12}, {32, 12}, "none", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 1, 1}, {16, 32, 1, 54}, "none", true },
select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 1, 54}, {16, 32, 15, 1}, "none", true },
select_params{ "GPU", {35, 9}, {3, 24, 1, 1}, {3, 24, 35, 9}, "none", true },
select_params{ "GPU", {3, 24, 1, 1}, {35, 9}, {35, 9}, "none", true },
select_params{ "GPU", {9}, {3, 1, 1, 1}, {3, 1, 1, 1}, "none", true }
));
INSTANTIATE_TEST_CASE_P(
smoke_TestsSelectNumpyBroadcastError, SelectTests,
::testing::Values(
select_params{ "GPU", {1, 32, 15, 54}, {1, 32, 15, 54}, {16, 32, 15, 54}, "numpy", true },
select_params{ "GPU", {16, 1, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "numpy", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "numpy", true },
select_params{ "GPU", {16, 32, 15, 1}, {16, 32, 15, 1}, {16, 32, 15, 54}, "numpy", true },
select_params{ "GPU", {15, 32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "numpy", true },
select_params{ "GPU", {16, 33, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "numpy", true },
select_params{ "GPU", {16, 32, 16, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "numpy", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 56}, "numpy", true },
select_params{ "GPU", {3, 5, 35, 9}, {3, 24, 35, 7}, {3, 24, 35, 9}, "numpy", true },
select_params{ "GPU", {11, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 7, 9}, "numpy", true },
select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "numpy", true },
select_params{ "GPU", {11, 24, 35, 11}, {7, 13, 35, 9}, {3, 24, 27, 17}, "numpy", true },
select_params{ "GPU", {1}, {1}, {9}, "numpy", true },
select_params{ "GPU", {32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "numpy", true },
select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 33, 15, 54}, "numpy", true },
select_params{ "GPU", {16, 32, 1, 54}, {16, 32, 15, 1}, {16, 32, 2, 3}, "numpy", true },
select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 7, 14, 14}, "numpy", true },
select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 1, 1, 14}, "numpy", true },
select_params{ "GPU", {35, 9}, {35, 1}, {24, 35, 9}, "numpy", true },
select_params{ "GPU", {1}, {9}, {35, 9}, "numpy", true }
));

View File

@ -1,178 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct squeeze_unsqueeze_test_params {
std::string device_name;
std::string layerType;
InferenceEngine::SizeVector in_dim;
std::vector<int> squeeze_dims;
InferenceEngine::SizeVector ref_dim;
std::vector<float> ref;
};
template<typename data_t>
void ref_squeeze_unsqueeze(InferenceEngine::TBlob<float>& dst, squeeze_unsqueeze_test_params& prm) {
data_t* dst_data = dst.buffer().template as<data_t*>();
for (int i = 0; i < prm.ref.size(); ++i)
dst_data[i] = prm.ref[i];
}
template<typename data_t>
InferenceEngine::TBlob<uint8_t>::Ptr generateWeights(const std::vector<int> &data) {
InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(
{InferenceEngine::Precision::U8,{ data.size() * sizeof(data_t) }, InferenceEngine::C}
);
weights->allocate();
for (size_t i = 0; i < data.size(); i++) {
((data_t*) weights->buffer())[i] = data[i];
}
return InferenceEngine::TBlob<uint8_t>::Ptr(weights);
}
class SqueezeUnsqueezeTests : public TestsCommon, public WithParamInterface<squeeze_unsqueeze_test_params> {
std::string model_t = R"V0G0N(
<net Name="squeeze_unsqueeze" version="2" precision="FP32" batch="1">
<layers>
<layer name="Input1" type="Input" precision="FP32" id="1">
<output>
<port id="1">
_IN_
</port>
</output>
</layer>
<layer id="2" name="Input2" precision="FP32" type="Const">
<output>
<port id="0">
<dim>_INPUT_COUNT_</dim>
</port>
</output>
<blobs>
<custom offset="0" size="4"/>
</blobs>
</layer>
<layer name="squeeze_unsqueeze" id="5" type="_LAYER_" precision="FP32">
<input>
<port id="5">
_IN_
</port>
<port id="6">
<dim>_INPUT_COUNT_</dim>
</port>
</input>
<output>
<port id="9">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="5" to-port="5"/>
<edge from-layer="2" from-port="0" to-layer="5" to-port="6"/>
</edges>
</net>
)V0G0N";
std::string getModel(squeeze_unsqueeze_test_params p) {
std::string in, out;
for (auto& i : p.in_dim) {
in += "<dim>" + std::to_string(i) + "</dim>\n";
}
for (auto& o : p.ref_dim) {
out += "<dim>" + std::to_string(o) + "</dim>\n";
}
REPLACE_WITH_STR(model_t, "_LAYER_", p.layerType);
REPLACE_WITH_STR(model_t, "_IN_", in);
REPLACE_WITH_STR(model_t, "_OUT_", out);
REPLACE_WITH_NUM(model_t, "_INPUT_COUNT_", p.squeeze_dims.size());
return model_t;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
squeeze_unsqueeze_test_params p = ::testing::WithParamInterface<squeeze_unsqueeze_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model,generateWeights<float>(p.squeeze_dims) );
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
InferenceEngine::OutputsDataMap out;
out = net.getOutputsInfo();
std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
InferenceEngine::TBlob<float>::Ptr output;
output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
inferRequest.SetBlob(item.first, output);
// Output Reference
InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
dst_ref.allocate();
// Input Data
InferenceEngine::Blob::Ptr src;
src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) });
src->allocate();
fill_data_dbgval(src->buffer(), src->size());
auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
ref_squeeze_unsqueeze<float>(dst_ref, p);
inferRequest.SetBlob("Input1", src);
inferRequest.Infer();
compare(*output, dst_ref);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(SqueezeUnsqueezeTests, smoke_GPU_TestsSqueezeUnsqueeze) {}
// Test data vectors
std::vector<float> squeeze_ref1 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f };
std::vector<float> squeeze_ref2 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
std::vector<float> squeeze_ref3 = { 0.f, 1.f, 2.f };
INSTANTIATE_TEST_CASE_P(
smoke_TestsSqueezeUnsqueeze, SqueezeUnsqueezeTests,
::testing::Values(
squeeze_unsqueeze_test_params{ "GPU", "Squeeze", { 1, 1, 3, 2 }, { 0, 1 }, { 3, 2, 1, 1 }, squeeze_ref1 },
squeeze_unsqueeze_test_params{ "GPU", "Squeeze", { 3, 1, 3, 1 }, { 1 }, { 3, 3, 1, 1 }, squeeze_ref2 },
squeeze_unsqueeze_test_params{ "GPU", "Squeeze", { 3, 1, 3, 1 }, { 3 }, { 3, 1, 3, 1 }, squeeze_ref2 },
squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 3, 1, 1, 1 }, { 0, 2, 3 }, { 1, 3, 1, 1 }, squeeze_ref3 },
squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 1, 1, 3, 1 }, { 0 }, { 1, 1, 1, 3 }, squeeze_ref3 },
squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 1, 3, 1, 1 }, { 0, 1 }, { 1, 1, 1, 3 }, squeeze_ref3 },
squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 3, 1, 1, 1 }, { 0, 1, 2 }, { 1, 1, 1, 3 }, squeeze_ref3 }
));

View File

@ -1,355 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct strided_slice_test_params {
std::string device_name;
InferenceEngine::SizeVector in_dim;
std::vector<int> begin;
std::vector<int> end;
std::vector<int> strides;
InferenceEngine::SizeVector ref_dim;
std::vector<float> ref;
};
inline void clipping(int *idx, const int min, const int max) {
(*idx) = ((*idx) > min) ? (*idx) : min;
(*idx) = ((*idx) < max) ? (*idx) : (max - 1);
return;
}
void ref_strided_slice(
InferenceEngine::TBlob<float> &src,
InferenceEngine::TBlob<float> &dst,
InferenceEngine::SizeVector &out_dims,
std::vector<int> begin,
std::vector<int> end,
std::vector<int> stride,
InferenceEngine::SizeVector begin_mask,
InferenceEngine::SizeVector end_mask,
InferenceEngine::SizeVector ellipsis_mask,
InferenceEngine::SizeVector new_axis_mask,
InferenceEngine::SizeVector shrink_axis_mask
) {
size_t i;
const float *src_data = src.data();
InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims();
InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides();
float* dst_data = dst.data();
InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims();
InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides();
int new_axis = 0;
for (auto& na : new_axis_mask)
new_axis += na;
int shrink_axis = 0;
for (auto& sa : shrink_axis_mask)
shrink_axis += sa;
int max_dims = src_dims.size() + new_axis;
// Check beging/end/stride vector sizes
int bounds_size = 0;
if (begin.size() && end.size() && begin.size() != end.size()) FAIL() << "Begin vector size should be equal end vectror size";
if (begin.size() && stride.size() && stride.size() != begin.size()) FAIL() << "Stride vector size should be equal begin vectror size";
if (end.size() && stride.size() && stride.size() != end.size()) FAIL() << "Stride vector size should be equal end vectror size";
if (begin.size()) bounds_size = begin.size();
if (end.size()) bounds_size = end.size();
if (stride.size()) bounds_size = stride.size();
// ellipsis_mask must be a power of two (only one ellipsis), so to take a first position
int ellipsis_pos1, ellipsis_pos2;
ellipsis_pos1 = ellipsis_pos2 = max_dims;
for (i = 0; i < ellipsis_mask.size(); i++) {
if (ellipsis_mask[i] > 0) {
ellipsis_pos1 = i;
break;
}
}
bounds_size -= ellipsis_pos1;
if(bounds_size > 0 && (max_dims - bounds_size) > ellipsis_pos1)
ellipsis_pos2 = max_dims - bounds_size;
std::vector<int> begin_dms(max_dims, 0);
std::vector<int> end_dms(max_dims, -1);
std::vector<int> stride_dms(max_dims, 1);
int j, k, bj, ej, sj;
InferenceEngine::SizeVector our_dims;
for (i = 0, j = 0, k = 0, bj = 0, ej = 0, sj = 0; i < max_dims; i++) {
if (i >= ellipsis_pos1 && i < ellipsis_pos2) {
if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) {
end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j++] + end_dms[i];
} else {
//end_dms[i] = 0;
end_dms[i] = begin_dms[i];
}
out_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
our_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
k = ellipsis_pos1;
continue;
}
stride_dms[i] = (stride.size() > sj && stride[sj] != 0) ? stride[sj++] : 1;
if (!(begin_mask.size() > j && begin_mask[j] == 0))
begin_dms[i] = begin.size() > bj ? begin[bj] : (stride_dms[i] > 0 ? 0 : -1);
else
begin_dms[i] = stride_dms[i] > 0 ? 0 : -1;
bj++;
begin_dms[i] = begin_dms[i] >= 0 ? begin_dms[i] : src_dims[j] + begin_dms[i];
// Clipping 'begin'
clipping(&begin_dms[i], 0, src_dims[j]);
if (!(end_mask.size() > j && end_mask[j] == 0)) {
int end_dms_tmp = end.size() > ej ? (stride_dms[i] > 0 ? end[ej] - 1 : end[ej] + 1) : end_dms[i];
end_dms[i] = end.size() > ej ? end_dms_tmp : (stride_dms[i] > 0 ? -1 : 0);
}
else {
end_dms[i] = stride_dms[i] > 0 ? -1 : 0;
}
ej++;
end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j] + end_dms[i];
// Clipping 'end'
clipping(&end_dms[i], 0, src_dims[j]);
if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1))
j++;
else
end_dms[i] = 0;
if (shrink_axis_mask.size() > k && shrink_axis_mask[k] == 1)
end_dms[i] = begin_dms[i];
else
out_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
our_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
k++;
}
size_t work_amount_dst = dstStrides[0] * dst_dims[0];
InferenceEngine::SizeVector counters(max_dims, 0);
for (size_t iwork = 0, dst_idx = 0; iwork < work_amount_dst; ++iwork) {
int src_idx = 0;
for (i = 0, j = 0; i < max_dims; ++i) {
src_idx += (begin_dms[i] + counters[i] * stride_dms[i]) * srcStrides[j];
if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) j++;
}
dst_data[dst_idx++] = src_data[src_idx];
for (j = max_dims - 1; j >= 0; j--) {
counters[j] = (counters[j] + 1) % our_dims[j];
if (counters[j] != 0) break;
}
}
}
template<typename data_t>
void ref_strided_slice(std::vector<Blob::Ptr> &dsts, strided_slice_test_params& prm) {
data_t *dst_data = dsts[0]->buffer().as<data_t*>();
for(int i = 0; i < prm.ref.size(); ++i)
dst_data[i] = prm.ref[i];
}
InferenceEngine::TBlob<uint8_t>::Ptr generateWeights(const std::vector<std::vector<int>> &data) {
size_t totalSize = 0;
for (size_t i = 0; i < data.size(); ++i)
totalSize += data[i].size();
InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(
{ InferenceEngine::Precision::U8,{ totalSize * sizeof(uint32_t) }, Layout::C }
);
weights->allocate();
size_t vectorCounter = 0;
size_t innerVectorCounter = 0;
for (size_t i = 0; i < totalSize; i++) {
if (innerVectorCounter >= data[vectorCounter].size()) {
++vectorCounter;
innerVectorCounter = 0;
}
((uint32_t*) weights->buffer())[i] = data[vectorCounter][innerVectorCounter];
++innerVectorCounter;
}
return InferenceEngine::TBlob<uint8_t>::Ptr(weights);
}
class StridedSliceTests : public TestsCommon, public WithParamInterface<strided_slice_test_params> {
std::string model_t = R"V0G0N(
<net Name="strided_slice" version="2" precision="FP32" batch="1">
<layers>
<layer name="Input1" type="Input" precision="FP32" id="1">
<output>
<port id="1">
_IN_
</port>
</output>
</layer>
<layer id="2" name="Input2" precision="FP32" type="Const">
<output>
<port id="0">
<dim>4</dim>
</port>
</output>
<blobs>
<custom offset="0" size="4"/>
</blobs>
</layer>
<layer id="3" name="Input3" precision="FP32" type="Const">
<output>
<port id="0">
<dim>4</dim>
</port>
</output>
<blobs>
<custom offset="16" size="4"/>
</blobs>
</layer>
<layer id="4" name="Input4" precision="FP32" type="Const">
<output>
<port id="0">
<dim>4</dim>
</port>
</output>
<blobs>
<custom offset="32" size="4"/>
</blobs>
</layer>
<layer name="strided_slice" id="5" type="StridedSlice" precision="FP32">
<data begin_mask=""
end_mask=""
ellipsis_mask=""
new_axis_mask=""
shrink_axis_mask=""/>
<input>
<port id="5">
_IN_
</port>
<port id="6">
<dim>4</dim>
</port>
<port id="7">
<dim>4</dim>
</port>
<port id="8">
<dim>4</dim>
</port>
</input>
<output>
<port id="9">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="5" to-port="5"/>
<edge from-layer="2" from-port="0" to-layer="5" to-port="6"/>
<edge from-layer="3" from-port="0" to-layer="5" to-port="7"/>
<edge from-layer="4" from-port="0" to-layer="5" to-port="8"/>
</edges>
</net>
)V0G0N";
std::string getModel(strided_slice_test_params p) {
std::string in, out;
for (auto& i : p.in_dim) {
in += "<dim>" + std::to_string(i) + "</dim>\n";
}
for (auto& o : p.ref_dim) {
out += "<dim>" + std::to_string(o) + "</dim>\n";
}
REPLACE_WITH_STR(model_t, "_IN_", in);
REPLACE_WITH_STR(model_t, "_OUT_", out);
return model_t;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
strided_slice_test_params p = ::testing::WithParamInterface<strided_slice_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, generateWeights({ p.begin, p.end, p.strides }));
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
InferenceEngine::OutputsDataMap out;
out = net.getOutputsInfo();
std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
InferenceEngine::TBlob<float>::Ptr output;
output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
inferRequest.SetBlob(item.first, output);
// Output Reference
InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
dst_ref.allocate();
// Input Data
InferenceEngine::Blob::Ptr src;
src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) });
src->allocate();
fill_data_dbgval(src->buffer(), src->size());
auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
ref_strided_slice(*srcPtr, dst_ref, p.ref_dim, p.begin, p.end, p.strides, {}, {}, {}, {}, {});
inferRequest.SetBlob("Input1", src);
inferRequest.Infer();
compare(*output, dst_ref);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(StridedSliceTests, smoke_GPU_TestsStridedSlice) {}
// Test data vectors
std::vector<float> ref1 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f };
std::vector<float> ref2 = { 15.f };
std::vector<float> ref3 = { 0.f, 1.f, 2.f, 6.f, 7.f, 8.f, 12.f, 13.f, 14.f, 18.f, 19.f, 20.f, 24.f, 25.f, 26.f, 30.f, 31.f, 32.f, 36.f, 37.f, 38.f, 42.f, 43.f, 44.f };
std::vector<float> ref4 = { 33.f, 34.f, 35.f, 41.f, 42.f, 43.f, 49.f, 50.f, 51.f, 57.f, 58.f, 59.f };
std::vector<float> ref5 = { 0.f, 1.f, 2.f, 8.f, 9.f, 10.f, 12.f, 13.f, 14.f, 20.f, 21.f, 22.f, 24.f, 25.f, 26.f, 32.f, 33.f, 34.f, 36.f, 37.f, 38.f, 44.f, 45.f, 46.f };
INSTANTIATE_TEST_CASE_P(
smoke_TestsStridedSlice, StridedSliceTests,
::testing::Values(
strided_slice_test_params{ "GPU", { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, ref1 },
strided_slice_test_params{ "GPU", { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, ref2 },
strided_slice_test_params{ "GPU", { 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, { 2, 2, 2, 3 }, ref3 },
strided_slice_test_params{ "GPU", { 2, 2, 4, 4 }, { 1, 0, 0, 1 }, { 2, 2, 4, 4 }, { 1, 1, 2, 1 }, { 1, 2, 2, 3 }, ref4 },
strided_slice_test_params{ "GPU", { 2, 2, 3, 4 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, { 2, 2, 2, 3 }, ref5 }
));

View File

@ -1,13 +0,0 @@
#include "variadic_split_tests.hpp"
TEST_P(VariadicSplitTests, smoke_GPU_TestsVariadicSplit) {}
INSTANTIATE_TEST_CASE_P(
smoke_TestsVariadicSplit, VariadicSplitTests,
::testing::Values(
variadic_split_params{ "GPU", 1, {2, 4}, {1, 6, 22, 22}, {{1, 2, 22, 22}, {1, 4, 22, 22}} },
variadic_split_params{ "GPU", 1, {4, 6}, {1, 10, 22, 22}, {{1, 4, 22, 22}, {1, 6, 22, 22}} },
variadic_split_params{ "GPU", 1, {2, 4, 1}, {1, 7, 22, 22}, {{1, 2, 22, 22}, {1, 4, 22, 22}, {1, 1, 22, 22}} },
variadic_split_params{ "GPU", 2, {10, 6}, {1, 10, 16, 22}, {{1, 10, 10, 22}, {1, 10, 6, 22}} },
variadic_split_params{ "GPU", 3, {2, 4, 9, 10, 11}, {1, 5, 5, 36}, {{1, 5, 5, 2}, {1, 5, 5, 4}, {1, 5, 5, 9}, {1, 5, 5, 10}, {1, 5, 5, 11}} }
));

View File

@ -1,15 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "activation_tests.hpp"
activation_test_params act_test_cases[] = {
activation_test_params("CPU", case_1, "relu"),
activation_test_params("CPU", case_1, "exp"),
activation_test_params("CPU", case_1, "not"),
};
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsActivationFunctions, ActivationTest, ::testing::ValuesIn(act_test_cases), getTestCaseName);

View File

@ -1,56 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "arg_max_min_tests.hpp"
static std::vector<float> in_data = { 0.0f, 1.0f,
20.0f, 12.0f,
12.0f, 0.0f,
15.0f, 8.0f,
9.0f, 4.0f,
25.0f, 15.0f,
0.0f, 0.0f,
1.0f, 1.0f,
0.0f, 0.0f,
24.0f, 12.0f,
8.0f, 9.0f,
2.0f, 14.0 };
INSTANTIATE_TEST_CASE_P(
smoke_mkldnn_TestsArgMaxMin, ArgMaxMinTFTests,
::testing::Values(
// Params: device_name, in_dim, in_data, has_axis, out_max_val, top_k, axis, ref_dim, ref_data
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 0, { 1, 3, 2, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 1, { 2, 1, 2, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 2, { 2, 3, 1, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 1, 3, { 2, 3, 2, 1 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 0, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 1, { 2, 2, 2, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 2, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 2, 3, { 2, 3, 2, 2 } },
argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
1, 0, 3, 1, { 2, 3, 2, 2 } }
));

View File

@ -1,27 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "bin_conv_tests.hpp"
bin_conv_test_params bin_conv_only_test_cases[] = {
bin_conv_test_params("CPU", case_1),
bin_conv_test_params("CPU", case_2),
bin_conv_test_params("CPU", case_3),
bin_conv_test_params("CPU", case_4),
bin_conv_test_params("CPU", case_5),
bin_conv_test_params("CPU", case_6),
bin_conv_test_params("CPU", case_7),
bin_conv_test_params("CPU", case_8),
bin_conv_test_params("CPU", case_9),
bin_conv_test_params("CPU", case_10),
bin_conv_test_params("CPU", case_11),
bin_conv_test_params("CPU", case_12),
bin_conv_test_params("CPU", case_13),
bin_conv_test_params("CPU", case_14),
bin_conv_test_params("CPU", case_15),
bin_conv_test_params("CPU", case_16)
};
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestBinaryConvolution, BinaryConvolutionOnlyTest, ::testing::ValuesIn(bin_conv_only_test_cases), getTestCaseName);

View File

@ -1,22 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "deformable_psroi_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_TestDeformable, DeformablePSROIOnlyTest,
::testing::Values(
deformable_psroi_test_params{"CPU", {1, 7938, 38, 38}, {300, 5}, {300, 162, 7, 7},
0.0625, 162, 7, 7, 7, 7, 4, true
},
deformable_psroi_test_params{"CPU", {1, 392, 38, 38}, {300, 5}, {300, 8, 7, 7},
0.0625, 8, 7, 7, 7, 7, 4, false, 0.1, {300, 2, 7, 7}
},
deformable_psroi_test_params{"CPU", {1, 98, 38, 38}, {300, 5}, {300, 2, 7, 7},
0.0625, 2, 7, 7, 7, 7, 4, true
},
deformable_psroi_test_params{"CPU", {1, 3969, 38, 38}, {300, 5}, {300, 81, 7, 7},
0.0625, 81, 7, 7, 7, 7, 4, false, 0.1, {300, 162, 7, 7}
}
));

View File

@ -1,18 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "depth_to_space_tests.hpp"
//TEST_P(DepthToSpaceTests, TestsDepthToSpace) {}
//INSTANTIATE_TEST_CASE_P(
// TestsDepthToSpace, DepthToSpaceTests,
// ::testing::Values(
// depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 1, 1 }, input0, 2, { 1, 1, 2, 2 }, ref_input0_bs2 },
// depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 2, 1 }, input1, 2, { 1, 1, 4, 2 }, ref_input1_bs2 },
// depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 2, 2 }, input2, 2, { 1, 1, 4, 4 }, ref_input2_bs2 },
// depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 3, 2 }, input3, 2, { 1, 1, 6, 4 }, ref_input3_bs2 },
// depth_to_space_test_params{ "CPU", "FP32", { 1, 9, 3, 3 }, input4, 3, { 1, 1, 9, 9 }, ref_input4_bs3 },
// depth_to_space_test_params{ "CPU", "FP32", { 1, 18, 3, 3 }, input5, 3, { 1, 2, 9, 9 }, ref_input5_bs3 }
//));

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "eltwise_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestEltwise, EltwiseOnlyTest,
::testing::Values(
eltwise_test_params{"CPU",
{13, 13, 1}, eltwise_test_params::Sum, 4},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Max, 3},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Prod, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Sub, 4},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Min, 3},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Div, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Squared_diff, 2},
eltwise_test_params{"CPU",
{13, 13, 1}, eltwise_test_params::Equal, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Not_equal, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Less, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Less_equal, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Greater, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Greater_equal, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Logical_AND, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Logical_OR, 5},
eltwise_test_params{"CPU",
{23, 23, 1}, eltwise_test_params::Logical_XOR, 5}
));

View File

@ -1,27 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gather_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsGather, GatherTFTests,
::testing::Values(
gatherTF_test_params{ "CPU", "FP32", { 1, 4 }, in0,{ 2, 2 }, dict2D, 0, { 1, 4, 2 }, ref_in0_a0_d22 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict,-1, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict, 0, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict,-3, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict, 1, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict,-2, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
gatherTF_test_params{ "CPU", "I32", { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
gatherTF_test_params{ "CPU", "I32", { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
gatherTF_test_params{ "CPU", "I32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 }
));

View File

@ -1,35 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gemm_tests.hpp"
gemm_base_params gemm_smoke_cases[] = {
case6, case14, case22, case30,
case38
};
INSTANTIATE_TEST_CASE_P(smoke_CPU_GemmRandomTest, GemmRandomTest,
testing::Combine(
testing::Values("CPU"),
testing::Values("FP32"),
testing::ValuesIn(gemm_smoke_cases)
));
gemm_base_params gemm_all_cases[] = { // 5D cases
case1, case2, case3, case4, case5, /* case7, case8, */
case9, case10, case11, case12, case13, /* case15, case16, */
case17, case18, case19, case20, case21, /* case23, case24, */
case25, case26, case27, case28, case29, /* case31, case32, */
case33, case34, case35, case36, case37, case38,
// Cases with mismatched input dimension numbers
// case39, case40, case41, case42, case43, case44,
// case45, case46, case47
};
INSTANTIATE_TEST_CASE_P(nightly_CPU_GemmRandomTest, GemmRandomTest,
testing::Combine(
testing::Values("CPU"),
testing::Values("FP32"),
testing::ValuesIn(gemm_all_cases)
));

View File

@ -1,10 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "pad_tests.hpp"
PLUGING_CASE(CPU, PadTFTests, 1, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "constant", 0.f, { 6, 9 }, ref_constant);
PLUGING_CASE(CPU, PadTFTests, 2, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "edge", 0.f, { 6, 9 }, ref_edge);
PLUGING_CASE(CPU, PadTFTests, 3, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "reflect", 0.f, { 6, 9 }, ref_reflect);
PLUGING_CASE(CPU, PadTFTests, 4, { 3, 4 }, in, { 2, 2 }, { 1, 3 },"symmetric", 0.f, { 6, 9 }, ref_symmetric);

View File

@ -1,29 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "permute_tests.hpp"
permute_test_params permute_only_test_cases[] = {
permute_test_params("CPU", case_1),
permute_test_params("CPU", case_2),
permute_test_params("CPU", case_3),
permute_test_params("CPU", case_4),
permute_test_params("CPU", case_5),
permute_test_params("CPU", case_6),
permute_test_params("CPU", case_7),
permute_test_params("CPU", case_8),
permute_test_params("CPU", case_9),
permute_test_params("CPU", case_10),
permute_test_params("CPU", case_11),
permute_test_params("CPU", case_12),
permute_test_params("CPU", case_13),
permute_test_params("CPU", case_14),
permute_test_params("CPU", case_15),
permute_test_params("CPU", case_16)
};
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestPermute, PermuteOnlyTests, ::testing::ValuesIn(permute_only_test_cases));

View File

@ -1,35 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "quantize_tests.hpp"
quantize_test_params quantize_only_test_cases[] = {
quantize_test_params{"CPU", case_1},
quantize_test_params{"CPU", case_2},
quantize_test_params{"CPU", case_3},
quantize_test_params{"CPU", case_4},
quantize_test_params{"CPU", case_5},
quantize_test_params{"CPU", case_6},
quantize_test_params{"CPU", case_7},
quantize_test_params{"CPU", case_8},
quantize_test_params{"CPU", case_9},
quantize_test_params{"CPU", case_10},
quantize_test_params{"CPU", case_11},
quantize_test_params{"CPU", case_12},
quantize_test_params{"CPU", case_13},
quantize_test_params{"CPU", case_14},
quantize_test_params{"CPU", case_15},
quantize_test_params{"CPU", case_16},
quantize_test_params{"CPU", case_17},
quantize_test_params{"CPU", case_18},
quantize_test_params{"CPU", case_19},
quantize_test_params{"CPU", case_20},
quantize_test_params{"CPU", case_21},
quantize_test_params{"CPU", case_22},
quantize_test_params{"CPU", case_23},
quantize_test_params{"CPU", case_24},
};
INSTANTIATE_TEST_CASE_P(smoke_CPUTestQuantize, QuantizeOnlyTest, ::testing::ValuesIn(quantize_only_test_cases));

View File

@ -1,80 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsReduceSum, ReduceTestsShared,
::testing::Values(
// Params: library_name, reduce_type, keep_dims, in_shape, input_tensor, axes_for_reduction, out_shape, reference
/* reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ -3 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4, 1, 1 },{},{ 2 },{ 2, 3, 1, 1, 1 },{ 10, 26, 42, 58, 74, 90 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ -1 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, 2 },{ 1, 3, 1 },{ 68, 100, 132 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 1, 2 },{ 2, 1, 1 },{ 78, 222 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2, 1 },{ 2, 1, 1 },{ 78, 222 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, 1, 2 },{ 1, 1, 1 },{ 300 } },
reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, -2, 2 },{ 1, 1, 1 },{ 300 } },*/
reduce_test_params{ "CPU", "I32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2, 2, 0, 2, 0 },{ 1, 3, 1 },{ 68, 100, 132 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ -3 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ -1 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, 2 },{ 3 },{ 68, 100, 132 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 1, 2 },{ 2 },{ 78, 222 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2, 1 },{ 2 },{ 78, 222 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, 1, 2 },{},{ 300 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, -2, 2 },{},{ 300 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2, 2, 0, 2, 0 },{ 3 },{ 68, 100, 132 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", true,{ 1, 2, 3, 4, 1 },{},{ 1 },{ 1, 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 1, 2, 3, 4, 1 },{},{ 1 },{ 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }
));
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsReduce, ReduceTestsShared,
::testing::Values(
// Params: library_name, reduce_type, keep_dims, in_shape, input_tensor, axes_for_reduction, out_shape, reference
reduce_test_params{ "CPU", "I32", "ReduceAnd", true,{ 2, 2, 2 },{1, 0, 1, 1, 0, 1, 1, 0},{ 2 },{ 2, 2, 1 },{ 0, 1, 0, 0} },
reduce_test_params{ "CPU", "I32", "ReduceAnd", false, { 2, 2, 2 },{1, 0, 1, 1, 0, 1, 1, 0},{ 0, 1, 2 },{ },{ 0 } },
reduce_test_params{ "CPU", "I32", "ReduceL1", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{ } },
reduce_test_params{ "CPU", "I32", "ReduceL1", true, { 3, 2, 2 },{},{ 2 },{ 3, 2, 1 },{ 3, 7, 11, 15, 19, 23 } },
reduce_test_params{ "CPU", "I32", "ReduceL1", false, { 3, 2, 2 },{},{ 2 },{ 3, 2 },{ 3, 7, 11, 15, 19, 23 } },
reduce_test_params{ "CPU", "I32", "ReduceL1", false, { 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 78 } },
reduce_test_params{ "CPU", "I32", "ReduceL2", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceL2", true,{ 3, 2, 2 },{},{ 2 },{ 3, 2, 1 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } },
reduce_test_params{ "CPU", "I32", "ReduceL2", false,{ 3, 2, 2 },{},{ 2 },{ 3, 2 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } },
reduce_test_params{ "CPU", "I32", "ReduceL2", false,{ 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 25.49509757f } },
reduce_test_params{ "CPU", "I32", "ReduceLogSum", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceLogSum", true,{ 3, 2, 2 },{ },{ 1 },{ 3, 1, 2 },{ } },
reduce_test_params{ "CPU", "I32", "ReduceLogSum", false,{ 3, 2, 2 },{ },{ 1 },{ 3, 2 },{ } },
reduce_test_params{ "CPU", "I32", "ReduceLogSum", false,{ 3, 2, 2 },{ },{ 0, 1, 2 },{},{ } },
reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", true,{ 5, 5, 2 },{},{ 2 },{ 5, 5, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } },
reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } },
reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60.00671387f } },
reduce_test_params{ "CPU", "I32", "ReduceMax", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceMax", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20, 2, 40, 2, 60, 2 } },
reduce_test_params{ "CPU", "I32", "ReduceMax", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20, 2, 40, 2, 60, 2 } },
reduce_test_params{ "CPU", "I32", "ReduceMax", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60 } },
reduce_test_params{ "CPU", "I32", "ReduceMean", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceMean", true, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } },
reduce_test_params{ "CPU", "I32", "ReduceMean", false, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } },
reduce_test_params{ "CPU", "I32", "ReduceMean", false, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{ },{ 18.25f } },
reduce_test_params{ "CPU", "I32", "ReduceMin", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceMin", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 5, 1, 30, 1, 55, 1 } },
reduce_test_params{ "CPU", "I32", "ReduceMin", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 5, 1, 30, 1, 55, 1 } },
reduce_test_params{ "CPU", "I32", "ReduceMin", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 1 } },
reduce_test_params{ "CPU", "I32", "ReduceOr", true,{ 2, 2, 2 },{1, 0, 1, 1, 0, 0, 1, 0},{ 2 },{ 2, 2, 1 },{1, 1, 0, 1 } },
reduce_test_params{ "CPU", "I32", "ReduceOr", false, { 2, 2, 2 },{},{ 0, 1, 2 },{ },{ 1 } },
reduce_test_params{ "CPU", "I32", "ReduceProd", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceProd", true,{ 3, 2, 2 },{},{ 1 },{ 3, 1, 2 },{ 3, 8, 35, 48, 99, 120 } },
reduce_test_params{ "CPU", "I32", "ReduceProd", false,{ 3, 2, 2 },{},{ 1 },{ 3, 2 },{ 3, 8, 35, 48, 99, 120 } },
reduce_test_params{ "CPU", "I32", "ReduceProd", false,{ 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 4.790016e+08 } },
reduce_test_params{ "CPU", "I32", "ReduceSumSquare", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
reduce_test_params{ "CPU", "I32", "ReduceSumSquare", true, { 3, 2, 2 },{},{ 1 },{ 3, 1, 2 },{ 10, 20, 74, 100, 202, 244 } },
reduce_test_params{ "CPU", "I32", "ReduceSumSquare", false, { 3, 2, 2 },{},{ 1 },{ 3, 2 },{ 10, 20, 74, 100, 202, 244 } },
reduce_test_params{ "CPU", "I32", "ReduceSumSquare", false, { 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 650 } }
));

View File

@ -1,45 +0,0 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "resample_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsResample, ResampleTests,
::testing::Values(
// 4D nearest
resample_test_params{"CPU", {2, 64, 15, 25}, 1.f, "caffe.ResampleParameter.NEAREST"},
resample_test_params{"CPU", {2, 64, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST"},
resample_test_params{"CPU", {1, 1, 10, 20}, 0.5f, "caffe.ResampleParameter.NEAREST"},
resample_test_params{"CPU", {2, 3, 15, 25}, 1.f, "caffe.ResampleParameter.NEAREST"},
resample_test_params{"CPU", {2, 3, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST"},
resample_test_params{"CPU", {1, 1, 10, 13}, 0.52f, "caffe.ResampleParameter.NEAREST"},
//// 4D linear
resample_test_params{"CPU", {2, 64, 15, 25}, 1.f, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {2, 64, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {1, 1, 15, 25}, 0.5, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {1, 3, 15, 25}, 0.5, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {2, 5, 3, 3}, 3.0f, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {2, 4, 10, 20}, 2.0f, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {2, 20, 30, 30}, 3.0f, "caffe.ResampleParameter.LINEAR"},
resample_test_params{"CPU", {2, 20, 3, 6}, 3.0f, "caffe.ResampleParameter.LINEAR"},
//// 5D nearest
resample_test_params{ "CPU", {1, 64, 20, 15, 25}, 1.f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {1, 64, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {1, 64, 10, 10, 20}, 0.5f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {1, 3, 20, 15, 25}, 1.f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {1, 3, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {2, 64, 20, 15, 25}, 1.f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {2, 64, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {2, 64, 10, 10, 20}, 0.5f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {2, 3, 20, 15, 25}, 1.f, "caffe.ResampleParameter.NEAREST" },
resample_test_params{ "CPU", {2, 3, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
// 5D linear
resample_test_params{ "CPU", {1, 8, 5, 2, 4}, 0.2f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {1, 8, 10, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {1, 2, 16, 12, 20}, 4.f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {2, 16, 15, 10, 20}, 1.f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {2, 2, 4, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {2, 4, 15, 10, 20}, 1.f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {2, 8, 16, 12, 20}, 4.f, "caffe.ResampleParameter.LINEAR" },
resample_test_params{ "CPU", {2, 16, 10, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR" }));

View File

@ -1,14 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "softmax_tests.hpp"
softmax_test_params softmax_only_test_cases[] = {
softmax_test_params("CPU", case_1),
softmax_test_params("CPU", case_8),
softmax_test_params("CPU", case_8_nc, "2D"),
};
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsSoftmax, SoftmaxOnlyTest, ::testing::ValuesIn(softmax_only_test_cases)/*, getTestCaseName*/);

View File

@ -1,15 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ti_tests.hpp"
ti_test_params ti_test_cases[] = {{std::string("CPU"), 1, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
{std::string("CPU"), 8, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
{std::string("CPU"), 1, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
{std::string("CPU"), 8, InferenceEngine::Precision(InferenceEngine::Precision::FP16)}};
RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, TITest, ti_test_cases);
RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, TITest2, ti_test_cases);

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "tile_tests.hpp"
tile_test_params tile_test_cases[] = {
tile_test_params("CPU", case_1),
tile_test_params("CPU", case_2),
tile_test_params("CPU", case_3),
tile_test_params("CPU", case_4),
tile_test_params("CPU", case_5),
tile_test_params("CPU", case_6),
tile_test_params("CPU", case_7),
tile_test_params("CPU", case_8),
tile_test_params("CPU", case_9),
tile_test_params("CPU", case_10),
tile_test_params("CPU", case_11),
tile_test_params("CPU", case_12),
tile_test_params("CPU", case_13),
tile_test_params("CPU", case_14),
tile_test_params("CPU", case_15),
tile_test_params("CPU", case_16),
};
INSTANTIATE_TEST_CASE_P(smoke_CPU_TestsGeneralTile, TileTest, ::testing::ValuesIn(tile_test_cases));

View File

@ -1,23 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gather_tree_tests.hpp"
INSTANTIATE_TEST_CASE_P(
smoke_CPU_TestsGatherTree, GatherTreeTests,
::testing::Values(
// Params: in_out_shape, step_idx, parent_idx, max_seq_len, end_token, reference
gather_tree_test_params{ {3, 2, 3 }, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10},
{0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 0, 2, 1, 2, 2, 1, 1},
{3, 3 }, {11}, {2, 2, 2, 2, 4, 4, 6, 5, 6, 7, 6, 6, 7, 8, 9, 8, 9, 10}, "CPU" },
gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1}, {0, 0, 0, 0, 1, 1, 2, 1, 2, -1, -1, -1},
{3}, {10}, {2, 2, 2, 6, 5, 6, 7, 8, 9, 10, 10, 10}, "CPU" },
gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1},
{4}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 10, 10, 10}, "CPU" },
gather_tree_test_params{ {5, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 10, 3, 2, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1, 2, 0, 1},
{5}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 3, 1, 10, 2, 10, 10}, "CPU" },
gather_tree_test_params{ {4, 2, 3}, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10, 0, 0, 0, 11, 12, 0},
{0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 1, 2, 2, 0, 1, -1, -1, -1, 0, 1, 0},
{3, 4}, {11}, {2, 2, 2, 2, 3, 2, 6, 5, 6, 7, 5, 7, 7, 8, 9, 8, 9, 8, 11, 11, 11, 11, 12, 0}, "CPU" }
));

View File

@ -1,179 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include "ir_gen_helper.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace single_layer_tests;
struct activation_base_params {
struct {
size_t w;
size_t h;
size_t c;
} in;
float n_clope;
};
struct activation_test_params : activation_base_params {
std::string device_name;
std::string activationType;
activation_test_params(std::string name, activation_base_params params, std::string activationType) :
activation_base_params(params), device_name(name), activationType(activationType) {}
};
template <typename data_t>
void ref_activation(const data_t *src_data, data_t *dst_data, activation_test_params prm)
{
size_t IW = prm.in.w;
size_t IH = prm.in.h;
size_t IC = prm.in.c;
for (uint32_t c = 0; c < IC; c++) {
for (uint32_t h = 0; h < IH; h++) {
for (uint32_t w = 0; w < IW; w++) {
uint32_t oidx = c * IH * IW
+ h * IW + w;
if (prm.activationType == "exp")
dst_data[oidx] = exp(src_data[oidx]);
else if (prm.activationType == "not")
dst_data[oidx] = !(src_data[oidx]);
else if (prm.activationType == "sin")
dst_data[oidx] = sin(src_data[oidx]);
else if (prm.activationType == "sinh")
dst_data[oidx] = sinh(src_data[oidx]);
else if (prm.activationType == "cos")
dst_data[oidx] = cos(src_data[oidx]);
else if (prm.activationType == "cosh")
dst_data[oidx] = cosh(src_data[oidx]);
else
dst_data[oidx] = src_data[oidx] >= 0.0 ?
src_data[oidx] :
src_data[oidx] * prm.n_clope;
}
}
}
}
class ActivationTest: public TestsCommon,
public WithParamInterface<activation_test_params> {
std::string layers_t = R"V0G0N(
<layer name="_ACTIVATION_TYPE_" id="1" type="_ACTIVATION_TYPE_" precision="FP32">
<input>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</input>
<output>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
)V0G0N";
std::string edges_t = R"V0G0N(
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
)V0G0N";
std::string getModel(activation_test_params p) {
std::string model = layers_t;
if (p.activationType == "exp")
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Exp");
else if (p.activationType == "not")
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Not");
else if (p.activationType == "sin")
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Sin");
else if (p.activationType == "sinh")
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Sinh");
else if (p.activationType == "cos")
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Cos");
else if (p.activationType == "cosh")
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Cosh");
else
REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "ReLU"); // Default value
REPLACE_WITH_NUM(model, "_IN_", 1);
REPLACE_WITH_NUM(model, "_IW_", p.in.w);
REPLACE_WITH_NUM(model, "_IH_", p.in.h);
REPLACE_WITH_NUM(model, "_IC_", p.in.c);
model = IRTemplateGenerator::getIRTemplate(p.activationType + "_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
return model;
}
protected:
virtual void SetUp() {
try {
activation_test_params p = ::testing::WithParamInterface<activation_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
InputsDataMap in_info_map = net.getInputsInfo();
OutputsDataMap out_info_map = net.getOutputsInfo();
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
SizeVector dims_src = {1,
p.in.c,
p.in.h,
p.in.w};
Blob::Ptr inputBlob = inferRequest.GetBlob(in_info_map.begin()->first);
float* src = inputBlob->buffer().as<float*>();
fill_data(src, inputBlob->size());
SizeVector dims_dst = dims_src;
Blob::Ptr outputBlob = inferRequest.GetBlob(out_info_map.begin()->first);
TBlob<float> dst_ref({ Precision::FP32, dims_dst, Layout::NCHW });
dst_ref.allocate();
inferRequest.Infer();
ref_activation<float>(src, dst_ref.data(), p);
const float* res = outputBlob->buffer().as<float*>();
const float* ref = dst_ref.data();
compare(res, ref, outputBlob->size());
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
#define case_1 activation_base_params({{228, 228, 3}, 0.0})
TEST_P(ActivationTest, TestsActivationFunctions) {}
std::string getTestCaseName(testing::TestParamInfo<activation_test_params> obj) {
return obj.param.device_name +
"_w" + std::to_string(obj.param.in.w) +
"_h" + std::to_string(obj.param.in.h) +
"_c" + std::to_string(obj.param.in.c) +
"_" + obj.param.activationType;
}

View File

@ -1,201 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
static inline int count(std::vector<size_t> dims, size_t start_ind, size_t end_ind) {
size_t count = 1;
for (size_t i = start_ind; i < end_ind; i++)
count *= dims[i];
return static_cast<int>(count);
}
static inline int count(std::vector<size_t> dims, size_t start_ind = 0) {
return count(dims, start_ind, dims.size());
}
struct argMaxMinTF_test_params {
std::string device_name;
std::string layer_type;
InferenceEngine::SizeVector in_dim;
std::vector<float> in;
int has_axis;
int out_max_val;
size_t top_k;
int axis;
InferenceEngine::SizeVector ref_dim;
std::vector<float> ref;
};
static void ref_argmax(float *src_data, float* dst_data, argMaxMinTF_test_params p) {
int dim, axis_dist;
if (p.has_axis) {
int axis_ = (p.axis < 0) ? p.axis + static_cast<int>(p.in_dim.size()) : p.axis;
dim = static_cast<int>(p.in_dim[axis_]);
axis_dist = count(p.in_dim, axis_) / dim;
} else {
dim = count(p.in_dim, 1);
axis_dist = 1;
}
int num = count(p.in_dim) / dim;
std::vector<std::pair<float, int> > src_vector(dim);
for (int i = 0; i < num; ++i) {
for (int j = 0; j < dim; ++j) {
src_vector[j] = std::make_pair(
src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
}
if (p.layer_type == "ArgMax") {
for (int j = 0; j < p.top_k; j++) {
for (int k = src_vector.size() - 1; k > j; k--) {
if (src_vector[k].first > src_vector[k - 1].first) {
std::pair<float, int> tmp = src_vector[k];
src_vector[k] = src_vector[k - 1];
src_vector[k - 1] = tmp;
}
}
}
} else {
for (int j = 0; j < p.top_k; j++) {
for (int k = src_vector.size() - 1; k > j; k--) {
if (src_vector[k].first < src_vector[k - 1].first) {
std::pair<float, int> tmp = src_vector[k];
src_vector[k] = src_vector[k - 1];
src_vector[k - 1] = tmp;
}
}
}
}
for (int j = 0; j < p.top_k; ++j) {
if (p.out_max_val) {
if (p.has_axis) {
// Produces max_val per axis
dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].first;
} else {
// Produces max_ind and max_val
dst_data[2 * i * p.top_k + j] = src_vector[j].second;
dst_data[2 * i * p.top_k + p.top_k + j] = src_vector[j].first;
}
} else {
// Produces max_ind per axis
dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].second;
}
}
}
}
class ArgMaxMinTFTests : public TestsCommon, public WithParamInterface<argMaxMinTF_test_params> {
std::string model_t = R"V0G0N(
<net Name="ArgMin_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="input" type="Input" precision="FP32" id="1">
<output>
<port id="1">
_IDIM_
</port>
</output>
</layer>
<layer name="ArgMinTest" id="2" type="_LAYER_TYPE_" precision="FP32">
<data top_k="_TOP_K_" out_max_val="_OUT_MAX_VAL_" _AXIS_/>
<input>
<port id="1">
_IDIM_
</port>
</input>
<output>
<port id="2">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string getModel(argMaxMinTF_test_params p) {
std::string model = model_t;
std::string inDim;
std::string out;
for (auto& dim : p.in_dim) {
inDim += "<dim>";
inDim += std::to_string(dim) + "</dim>\n";
}
for (auto& dst : p.ref_dim) {
out += "<dim>";
out += std::to_string(dst) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_LAYER_TYPE_", p.layer_type);
REPLACE_WITH_STR(model, "_IDIM_", inDim);
REPLACE_WITH_NUM(model, "_TOP_K_", p.top_k);
REPLACE_WITH_NUM(model, "_OUT_MAX_VAL_", p.out_max_val);
std::string axis;
if (p.has_axis)
axis += "axis=\"" + std::to_string(p.axis) + "\"";
REPLACE_WITH_STR(model, "_AXIS_", axis);
REPLACE_WITH_STR(model, "_OUT_", out);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
argMaxMinTF_test_params p = ::testing::WithParamInterface<argMaxMinTF_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
InputsDataMap in_info_map = net.getInputsInfo();
OutputsDataMap out_info_map = net.getOutputsInfo();
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
Blob::Ptr inputBlob = inferRequest.GetBlob(in_info_map.begin()->first);
float* inputData = inputBlob->buffer().as<float*>();
memcpy(inputData, &p.in[0], sizeof(float)*p.in.size());
TBlob<float> dst_ref(out_info_map.begin()->second->getTensorDesc());
dst_ref.allocate();
ref_argmax(inputData, dst_ref.data(), p);
inferRequest.Infer();
Blob::Ptr outputBlob = inferRequest.GetBlob(out_info_map.begin()->first);
// Check results
compare(outputBlob->buffer().as<float*>(), dst_ref.buffer().as<float*>(), outputBlob->size());
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(ArgMaxMinTFTests, TestsArgMaxMin) {}

View File

@ -1,589 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <legacy/details/ie_cnn_network_iterator.hpp>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include <single_layer_common.hpp>
#include <string>
#include "conv_ref.hpp"
#include "common_test_utils/common_layers_params.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using std::vector;
struct conv_base_params {
vector<size_t> in_dims;
vector<size_t> kernel;
vector<size_t> strides;
vector<size_t> pads_begin;
vector<size_t> pads_end;
vector<size_t> dilations;
size_t out_c;
size_t grp_c;
vector<size_t> out_dims;
};
struct conv_test_params : conv_base_params {
std::string device_name;
std::string getDeviceName() const {
return device_name;
}
conv_test_params(std::string name, conv_base_params params) :
conv_base_params(params), device_name(name) {}
};
class ConvolutionOnlyTest : public TestsCommon,
public WithParamInterface<conv_test_params> {
std::string model_t_4D = R"V0G0N(
<net name="Convolution_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="conv1" id="1" type="Convolution" precision="FP32">
<convolution strides="_KS_"
pads_begin="_PB_" pads_end="_PE_"
kernel="_K_"
dilations="_DL_"
output="_OC_" group="_GC_"/>
<weights offset="0" size="_S1_" />
<biases offset="_S1_" size="_S2_" />
<input>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
<dim>_OC_</dim>
<dim>_OH_</dim>
<dim>_OW_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string model_t_4D_blobs_as_inputs = R"V0G0N(
<net name="Convolution_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="wei" type="Const" precision="FP32" id="1">
<output>
<port id="0">
<dim>_OC_</dim>
<dim>_ICG_</dim>
<dim>_KH_</dim>
<dim>_KW_</dim>
</port>
</output>
<blobs>
<custom offset="0" size="_S1_"/>
</blobs>
</layer>
<layer name="bias" type="Const" precision="FP32" id="2">
<output>
<port id="0">
<dim>_OC_</dim>
</port>
</output>
<blobs>
<custom offset="_S1_" size="_S2_"/>
</blobs>
</layer>
<layer name="conv1" id="3" type="Convolution" precision="FP32">
<convolution strides="_KS_"
pads_begin="_PB_" pads_end="_PE_"
kernel="_K_"
dilations="_DL_"
output="_OC_" group="_GC_"/>
<input>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
<port id="2">
<dim>_OC_</dim>
<dim>_ICG_</dim>
<dim>_KH_</dim>
<dim>_KW_</dim>
</port>
<port id="3">
<dim>_OC_</dim>
</port>
</input>
<output>
<port id="4">
<dim>_IN_</dim>
<dim>_OC_</dim>
<dim>_OH_</dim>
<dim>_OW_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="2"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="3"/>
</edges>
</net>
)V0G0N";
std::string model_t_5D = R"V0G0N(
<net name="Convolution_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="conv1" id="1" type="Convolution" precision="FP32">
<convolution strides="_KS_"
pads_begin="_PB_" pads_end="_PE_"
kernel="_K_"
dilations="_DL_"
output="_OC_" group="_GC_"/>
<weights offset="0" size="_S1_" />
<biases offset="_S1_" size="_S2_" />
<input>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
<dim>_OC_</dim>
<dim>_OD_</dim>
<dim>_OH_</dim>
<dim>_OW_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string model_t_5D_blobs_as_inputs = R"V0G0N(
<net name="Convolution_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="wei" type="Const" precision="FP32" id="1">
<output>
<port id="0">
<dim>_OC_</dim>
<dim>_ICG_</dim>
<dim>_KD_</dim>
<dim>_KH_</dim>
<dim>_KW_</dim>
</port>
</output>
<blobs>
<custom offset="0" size="_S1_"/>
</blobs>
</layer>
<layer name="bias" type="Const" precision="FP32" id="2">
<output>
<port id="0">
<dim>_OC_</dim>
</port>
</output>
<blobs>
<custom offset="_S1_" size="_S2_"/>
</blobs>
</layer>
<layer name="conv1" id="3" type="Convolution" precision="FP32">
<convolution strides="_KS_"
pads_begin="_PB_" pads_end="_PE_"
kernel="_K_"
dilations="_DL_"
output="_OC_" group="_GC_"/>
<input>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
<port id="2">
<dim>_OC_</dim>
<dim>_ICG_</dim>
<dim>_KD_</dim>
<dim>_KH_</dim>
<dim>_KW_</dim>
</port>
<port id="3">
<dim>_OC_</dim>
</port>
</input>
<output>
<port id="4">
<dim>_IN_</dim>
<dim>_OC_</dim>
<dim>_OD_</dim>
<dim>_OH_</dim>
<dim>_OW_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="2"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="3"/>
</edges>
</net>
)V0G0N";
protected:
virtual bool blobsAsInputs() { return false; }
size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) {
return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu;
}
void createBlobs(const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst, TBlob<float>::Ptr &dst_ref) {
auto in_size = p.in_dims.size();
auto out_size = p.out_dims.size();
SizeVector dims_dst = {
p.out_dims[out_size - 1] == 0 ?
calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1],
p.out_dims[out_size - 2] == 0 ?
calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2],
p.out_c,
1lu};
SizeVector dims_src;
for (int i = in_size; i > 0; i--) {
dims_src.push_back(p.in_dims[i - 1]);
}
Layout layout = NCHW;
if (in_size == 5) {
layout = NCDHW;
dims_dst.insert(dims_dst.begin() + 2, p.out_dims.size() > 2 ?
(p.out_dims[out_size - 3] == 0 ?
calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu);
}
src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), layout));
src->allocate();
dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout));
dst->allocate();
dst_ref = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout));
dst_ref->allocate();
}
TBlob<uint8_t>::Ptr fillWeights(const conv_test_params &p) {
auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
TBlob<uint8_t> *weights_ptr = new TBlob<uint8_t>(TensorDesc(Precision::U8,
{(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c + p.out_c)
* sizeof(float)}, C));
weights_ptr->allocate();
fill_data((float *) weights_ptr->buffer(), weights_ptr->size() / sizeof(float));
return TBlob<uint8_t>::Ptr(weights_ptr);
}
void calculateRef(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p, const TBlob<float>::Ptr &src,
TBlob<float>::Ptr &dst_ref) {
const float *weights_data = (const float *) weights->buffer();
size_t bias_size = p.out_c;
size_t weights_size = weights->size() / sizeof(float) - bias_size;
const float *bias_data = weights_data + weights_size;
CommonTestUtils::conv_common_params params;
for (int i = 0; i < p.kernel.size(); i++)
params.kernel.insert(i, p.kernel[i]);
for (int i = 0; i < p.strides.size(); i++)
params.stride.insert(i, p.strides[i]);
for (int i = 0; i < p.pads_begin.size(); i++)
params.pads_begin.insert(i, p.pads_begin[i]);
for (int i = 0; i < p.dilations.size(); i++)
params.dilation.insert(i, p.dilations[i]);
params.group = p.grp_c;
params.out_c = p.out_c;
ref_conv_common<float>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params);
}
CNNNetwork getNetwork(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p) {
Core ie;
return ie.ReadNetwork(getModel(p), weights);
}
virtual void infer(CNNNetwork &network, const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) {
Core ie;
ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.getDeviceName());
InferRequest inferRequest = exeNetwork.CreateInferRequest();
OutputsDataMap outInfo;
outInfo = network.getOutputsInfo();
inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
inferRequest.SetBlob(outInfo.begin()->first, dst);
inferRequest.Infer();
}
void SetUp() override {
try {
conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
TBlob<float>::Ptr src, dst, dst_ref;
createBlobs(p, src, dst, dst_ref);
fill_data(src->data(), src->size());
auto weights = fillWeights(p);
calculateRef(weights, p, src, dst_ref);
CNNNetwork network = getNetwork(weights, p);
infer(network, p, src, dst);
compare(*dst, *dst_ref);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
virtual std::string getModel(conv_test_params p) {
std::string model;
auto in_dims_size = p.in_dims.size();
if (in_dims_size == 4) {
if (blobsAsInputs())
model = model_t_4D_blobs_as_inputs;
else
model = model_t_4D;
} else if (in_dims_size == 5) {
if (blobsAsInputs())
model = model_t_5D_blobs_as_inputs;
else
model = model_t_5D;
}
auto out_dims_size = p.out_dims.size();
size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
size_t KH = p.kernel[Y_AXIS];
size_t KW = p.kernel[X_AXIS];
size_t SD = p.strides.size() > Z_AXIS ? p.strides[Z_AXIS] : 1lu;
size_t SH = p.strides[Y_AXIS];
size_t SW = p.strides[X_AXIS];
size_t ID = p.in_dims.size() > 4 ? p.in_dims[in_dims_size - 3] : 1lu;
size_t IH = p.in_dims[in_dims_size - 2];
size_t IW = p.in_dims[in_dims_size - 1];
size_t OD = p.out_dims.size() > 2 ? p.out_dims[out_dims_size - 3] : 1lu;
size_t OH = p.out_dims[out_dims_size - 2];
size_t OW = p.out_dims[out_dims_size - 1];
size_t PD = p.pads_begin.size() > Z_AXIS ? p.pads_begin[Z_AXIS] : 1lu;
size_t PH = p.pads_begin[Y_AXIS];
size_t PW = p.pads_begin[X_AXIS];
REPLACE_WITH_NUM(model, "_IW_", IW);
REPLACE_WITH_NUM(model, "_IH_", IH);
REPLACE_WITH_NUM(model, "_ID_", ID);
REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]);
REPLACE_WITH_NUM(model, "_ICG_", p.in_dims[1] / p.grp_c);
REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]);
REPLACE_WITH_NUM(model, "_KD_", KD);
REPLACE_WITH_NUM(model, "_KH_", KH);
REPLACE_WITH_NUM(model, "_KW_", KW);
REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations);
REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
REPLACE_WITH_NUM(model, "_OC_", p.out_c);
REPLACE_WITH_NUM(model, "_OD_", out_dims_size > 2 ? (OD == 0 ? calculateOutDim(ID, KD, SD, PD) : OD) : 1lu);
REPLACE_WITH_NUM(model, "_OH_", OH == 0 ? calculateOutDim(IH, KH, SH, PH) : OH);
REPLACE_WITH_NUM(model, "_OW_", OW == 0 ? calculateOutDim(IW, KW, SW, PW) : OW);
size_t w_data_size = (KW * KH * KD * p.out_c * p.in_dims[1] / p.grp_c) * sizeof(float);
size_t b_data_size = p.out_c * sizeof(float);
REPLACE_WITH_NUM(model, "_S1_", w_data_size);
REPLACE_WITH_NUM(model, "_S2_", b_data_size);
return model;
}
};
class ConvolutionReshapeTest : public ConvolutionOnlyTest {
protected:
void SetUp() override {
try {
conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
TBlob<float>::Ptr src, dst, dst_ref;
createBlobs(p, src, dst, dst_ref);
fill_data(src->data(), src->size());
auto weights = fillWeights(p);
calculateRef(weights, p, src, dst_ref);
CNNNetwork network = getNetwork(weights, p);
updatePaddings(network, p);
infer(network, p, src, dst);
compare(*dst, *dst_ref);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
void updatePaddings(const CNNNetwork &network, conv_test_params& p) {
details::CNNNetworkIterator i(network), end;
auto found = std::find_if(i, end, [](const CNNLayer::Ptr& layer) {
return layer->type == "Convolution";
});
ASSERT_NE(found, end);
auto convLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
auto allPad = getPaddings(*convLayer.get());
p.pads_begin[X_AXIS] = allPad.begin[X_AXIS];
p.pads_begin[Y_AXIS] = allPad.begin[Y_AXIS];
if (p.pads_begin.size() > Z_AXIS)
p.pads_begin[Z_AXIS] = allPad.begin[Z_AXIS];
}
void infer(CNNNetwork &network, const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) override {
Core ie;
ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.getDeviceName());
InferRequest inferRequest = exeNetwork.CreateInferRequest();
OutputsDataMap outInfo;
outInfo = network.getOutputsInfo();
inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
inferRequest.SetBlob(outInfo.begin()->first, dst);
inferRequest.Infer();
}
std::string getModel(conv_test_params p) override {
std::string model = ConvolutionOnlyTest::getModel(p);
REPLACE_WITH_STR(model, "convolution", "convolution auto_pad=\"same_upper\"");
std::string pads_pattern = "pads_begin=\"";
for (int i = p.pads_begin.size(); i > 0; i--) {
pads_pattern += std::to_string(p.pads_begin[i - 1]) + ",";
}
auto end = pads_pattern.end()--;
*end = '\"';
std::string pads = "pads_begin=\"0,0\"";
if (p.pads_begin.size() == 3) {
pads = "pads_begin=\"0,0,0\"";
}
REPLACE_WITH_NUM_VECTOR(model, pads_pattern, pads);
return model;
}
};
class ConvolutionBlobsAsInputsTest : public ConvolutionOnlyTest {
protected:
bool blobsAsInputs() override { return true; }
};
#define case_1 conv_base_params({{1lu, 9lu, 16lu, 32lu}, {1lu, 1lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
#define case_2 conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
#define case_3 conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
#define case_4 conv_base_params({{1lu, 3lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 2lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 20lu, 1lu, {0lu, 0lu}})
#define case_5 conv_base_params({{1lu, 9lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
#define case_6 conv_base_params({{1lu, 3lu, 224lu, 224lu}, {7lu, 7lu}, {2lu, 2lu}, {2lu, 2lu}, {0lu, 0lu}, {1lu, 1lu}, 64lu, 1lu, {112lu, 112lu}})
#define case_7 conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 16lu, 16lu, {0lu, 0lu}})
#define case_8 conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 32lu, 32lu, {0lu, 0lu}})
#define case_9 conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {9lu, 9lu}, 16lu, 16lu, {0lu, 0lu}})
#define case_10 conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 32lu, 32lu, {0lu, 0lu}})
#define case_11 conv_base_params({{1lu, 4lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 4lu, 4lu, {0lu, 0lu}})
#define case_12 conv_base_params({{1lu, 3lu, 224lu, 224lu}, {10lu, 10lu}, {1lu, 1lu}, {4lu, 4lu}, {0lu, 0lu}, {1lu, 1lu}, 4lu, 1lu, {224lu, 224lu}})
#define case_13 conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu}, {1lu, 1lu, 1lu}, {1lu, 1lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 17lu, 1lu, {0lu, 0lu, 0lu}})
#define case_14 conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu}, {3lu, 3lu, 3lu}, {2lu, 2lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 64lu, 1lu, {0lu, 0lu, 0lu}})
// NOTE: always auto_pad = same_upper. IR with zero_pads, pad from params is used for ref_conv after reshape
#define case_si_1 conv_base_params({{1lu, 144lu, 75lu, 75lu}, {3lu, 3lu}, {2lu, 2lu}, {1lu, 1lu}, {0lu, 0lu}, {1lu, 1lu}, 144lu, 144lu, {1lu, 1lu}})
TEST_P(ConvolutionOnlyTest, TestsConvolution) {
}
TEST_P(ConvolutionReshapeTest, TestsReshapeConvolution) {
}
TEST_P(ConvolutionBlobsAsInputsTest, TestsConvolutionBlobsAsInputs) {
}
std::string getTestCaseName(testing::TestParamInfo<conv_test_params> obj) {
auto in_dims_size = obj.param.in_dims.size();
return obj.param.device_name +
"_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) +
"_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) +
(obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") +
"_c" + std::to_string(obj.param.in_dims[1]) +
"_kw" + std::to_string(obj.param.kernel[X_AXIS]) +
"_kh" + std::to_string(obj.param.kernel[Y_AXIS]) +
(obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") +
"_sw" + std::to_string(obj.param.strides[X_AXIS]) +
"_sh" + std::to_string(obj.param.strides[Y_AXIS]) +
(obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") +
"_dilw" + std::to_string(obj.param.dilations[X_AXIS]) +
"_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) +
(obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") +
"_grpc" + std::to_string(obj.param.grp_c);
}

View File

@ -1,165 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include <ie_core.hpp>
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct depth_to_space_test_params {
std::string device_name;
std::string inPrecision;
InferenceEngine::SizeVector in_dim;
size_t block_size;
InferenceEngine::SizeVector ref_dim;
};
template<typename data_t>
void ref_depthToSpace(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, depth_to_space_test_params& prm) {
assert(dsts.size() == 1);
data_t *dst_data = dsts[0]->buffer().as<data_t*>();
const data_t *src_data = srcs[0]->buffer().as<data_t*>();
size_t feature_in = prm.in_dim[1];
size_t y_in = prm.in_dim[2];
size_t x_in = prm.in_dim[3];
size_t batch_out = prm.ref_dim[0];
size_t feature_out = prm.ref_dim[1];
size_t y_out = prm.ref_dim[2];
size_t x_out = prm.ref_dim[3];
for (size_t batch = 0; batch < batch_out; ++batch) {
for (size_t y = 0; y < y_out; ++y) {
size_t input_y = y / prm.block_size;
size_t offset_y = y % prm.block_size;
for (size_t x = 0; x < x_out; ++x) {
size_t input_x = x / prm.block_size;
size_t offset_x = (x % prm.block_size);
size_t offset_feature = (offset_y * prm.block_size + offset_x) * feature_out;
for (size_t feature = 0; feature < feature_out; ++feature) {
size_t input_feature = feature + offset_feature;
size_t input_index = (batch * feature_in * y_in * x_in) + (input_feature * y_in * x_in) + (input_y * x_in) + input_x;
size_t output_index = (batch * feature_out * y_out * x_out) + (feature * y_out * x_out) + (y * x_out) + x;
dst_data[output_index] = src_data[input_index];
}
}
}
}
}
class DepthToSpaceTests : public TestsCommon, public WithParamInterface<depth_to_space_test_params> {
std::string model_t = R"V0G0N(
<net Name="Depth2space_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="Input0" type="Input" precision="_IPRS_" id="1">
<output>
<port id="1">
_IDIM_
</port>
</output>
</layer>
<layer name="DepthToSpace" id="3" type="DepthToSpace" precision="FP32">
<data block_size="_BS_"/>
<input>
<port id="1">
_IDIM_
</port>
</input>
<output>
<port id="3">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string getModel(depth_to_space_test_params p) {
std::string model = model_t;
std::string inIdx;
std::string inDict;
std::string out;
for (auto& dct : p.in_dim) {
inDict += "<dim>";
inDict += std::to_string(dct) + "</dim>\n";
}
for (auto& dst : p.ref_dim) {
out += "<dim>";
out += std::to_string(dst) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_IPRS_", p.inPrecision);
REPLACE_WITH_STR(model, "_IDIM_", inDict);
REPLACE_WITH_NUM(model, "_BS_", p.block_size);
REPLACE_WITH_STR(model, "_OUT_", out);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
depth_to_space_test_params p = ::testing::WithParamInterface<depth_to_space_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
std::vector<Blob::Ptr> srcs_vec;
std::vector<Blob::Ptr> dsts_vec;
std::vector<Blob::Ptr> out_vec;
InputsDataMap in_info_map = net.getInputsInfo();
for (auto info : in_info_map) {
Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
blob->allocate();
fill_data_dbgval(blob->buffer().as<float*>(), blob->size());
inferRequest.SetBlob(info.first, blob);
srcs_vec.push_back(blob);
}
OutputsDataMap out_info_map = net.getOutputsInfo();
for (auto info : out_info_map) {
Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
blob->allocate();
inferRequest.SetBlob(info.first, blob);
out_vec.push_back(blob);
Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
blob_ref->allocate();
dsts_vec.push_back(blob_ref);
}
ref_depthToSpace<float>(srcs_vec, dsts_vec, p);
inferRequest.Infer();
compare(*out_vec[0], *dsts_vec[0]);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(DepthToSpaceTests, TestsDepthToSpace) {}

View File

@ -1,308 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include "common_test_utils/common_layers_params.hpp"
#include "common_test_utils/data_utils.hpp"
using namespace ::testing;
using namespace InferenceEngine;
struct eltwise_test_params {
std::string device_name;
struct {
size_t w;
size_t h;
size_t c;
} in;
enum opType {
Sum = 0, Prod = 1, Max = 2, Sub = 3, Min = 4, Div = 5, Squared_diff = 6, Equal = 7, Not_equal = 8,
Less = 9, Less_equal = 10, Greater = 11, Greater_equal = 12, Logical_AND = 13, Logical_OR = 14, Logical_XOR = 15,
Floor_mod = 16, Pow = 17
};
opType op;
size_t inputsNum;
};
template<typename data_t>
void ref_eltwise(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, eltwise_test_params prm) {
assert(dsts.size() == 1);
data_t *dst_data = dsts[0]->buffer().as<data_t*>();
const data_t *src_data = srcs[0]->buffer().as<data_t*>();
for (int i = 0; i < srcs[0]->size(); i++) {
dst_data[i] = src_data[i];
}
for (int n = 1; n < srcs.size(); n++) {
src_data = srcs[n]->buffer().as<data_t*>();
for (int i = 0; i < srcs[n]->size(); i++) {
switch (prm.op) {
case eltwise_test_params::Sum:
dst_data[i] += src_data[i];
break;
case eltwise_test_params::Prod:
dst_data[i] *= src_data[i];
break;
case eltwise_test_params::Max:
dst_data[i] = std::max<data_t>(dst_data[i], src_data[i]);
break;
case eltwise_test_params::Sub:
dst_data[i] -= src_data[i];
break;
case eltwise_test_params::Min:
dst_data[i] = std::min<data_t>(dst_data[i], src_data[i]);
break;
case eltwise_test_params::Div:
dst_data[i] /= src_data[i];
break;
case eltwise_test_params::Squared_diff: {
data_t tmp = (dst_data[i] - src_data[i]);
dst_data[i] = tmp * tmp;
break;
}
case eltwise_test_params::Equal:
dst_data[i] = dst_data[i] == src_data[i];
break;
case eltwise_test_params::Not_equal:
dst_data[i] = dst_data[i] != src_data[i];
break;
case eltwise_test_params::Less:
dst_data[i] = dst_data[i] < src_data[i];
break;
case eltwise_test_params::Less_equal:
dst_data[i] = dst_data[i] <= src_data[i];
break;
case eltwise_test_params::Greater:
dst_data[i] = dst_data[i] > src_data[i];
break;
case eltwise_test_params::Greater_equal:
dst_data[i] = dst_data[i] >= src_data[i];
break;
case eltwise_test_params::Logical_AND:
dst_data[i] = dst_data[i] && src_data[i];
break;
case eltwise_test_params::Logical_OR:
dst_data[i] = dst_data[i] || src_data[i];
break;
case eltwise_test_params::Logical_XOR:
dst_data[i] = !dst_data[i] != !src_data[i];
break;
case eltwise_test_params::Floor_mod: {
data_t src1 = src_data[i];
data_t src2 = dst_data[i];
dst_data[i] = src1 - src1 / src2 * src2;
break;
}
case eltwise_test_params::Pow: {
dst_data[i] = std::pow(src_data[i], dst_data[i]);
break;
}
}
}
}
}
class EltwiseOnlyTest : public TestsCommon,
public WithParamInterface<eltwise_test_params> {
std::string model_t = R"V0G0N(
<Net Name="Eltwise_Only" version="2" precision="FP32" batch="1">
<layers>
_INPUT_LAYERS_
<layer name="eltwise" id="0" type="Eltwise" precision="FP32">
<elementwise_data operation="_OP_"/>
<input>
_ELTWISE_INPUT_PORTS_
</input>
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
</layers>
<edges>
_EDGES_
</edges>
</Net>
)V0G0N";
std::string getModel(eltwise_test_params p) {
std::string model = model_t;
std::string op = p.op == eltwise_test_params::Sum ? "sum" :
p.op == eltwise_test_params::Prod ? "mul" :
p.op == eltwise_test_params::Max ? "max" :
p.op == eltwise_test_params::Sub ? "sub" :
p.op == eltwise_test_params::Min ? "min" :
p.op == eltwise_test_params::Div ? "div" :
p.op == eltwise_test_params::Squared_diff ? "squared_diff" :
p.op == eltwise_test_params::Equal ? "equal" :
p.op == eltwise_test_params::Not_equal ? "not_equal" :
p.op == eltwise_test_params::Less ? "less" :
p.op == eltwise_test_params::Less_equal ? "less_equal" :
p.op == eltwise_test_params::Greater ? "greater" :
p.op == eltwise_test_params::Greater_equal ? "greater_equal" :
p.op == eltwise_test_params::Logical_AND ? "logical_and" :
p.op == eltwise_test_params::Logical_OR ? "logical_or" :
p.op == eltwise_test_params::Logical_XOR ? "logical_xor" :
p.op == eltwise_test_params::Floor_mod ? "floor_mod" :
p.op == eltwise_test_params::Pow ? "pow" :
"sum" /* default */;
// Generating inputs layers
auto generateInput = [](size_t inputId) -> std::string {
std::string inputLayer = R"V0G0N(
<layer name="data_ID_" type="Input" precision="FP32" id="_ID_">
<output>
<port id="_ID_">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>)V0G0N";
REPLACE_WITH_NUM(inputLayer, "_ID_", inputId);
return inputLayer;
};
std::string tmp;
for (size_t i = 1; i < p.inputsNum + 1; ++i) {
tmp += generateInput(i);
}
REPLACE_WITH_STR(model, "_INPUT_LAYERS_", tmp);
// Generating Eltwise inputs
tmp.clear();
auto generateEltwiseInputPort = [](size_t inputId) -> std::string {
std::string inputPort = R"V0G0N(
<port id="_ID_">
<dim>_IN_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>)V0G0N";
REPLACE_WITH_NUM(inputPort, "_ID_", inputId);
return inputPort;
};
for (size_t i = p.inputsNum + 1; i < (2 * p.inputsNum) + 1; ++i) {
tmp += generateEltwiseInputPort(i);
}
REPLACE_WITH_STR(model, "_ELTWISE_INPUT_PORTS_", tmp);
// Generating Edges
tmp.clear();
auto generateEdge = [](size_t inputLayerId, size_t eltwiseInputPortId) -> std::string {
std::string edge = R"V0G0N(
<edge from-layer="_INPUT_LAYER_ID_" from-port="_INPUT_LAYER_ID_" to-layer="0" to-port="_ELTWISE_INPUT_PORT_ID_"/>)V0G0N";
REPLACE_WITH_NUM(edge, "_INPUT_LAYER_ID_", inputLayerId);
REPLACE_WITH_NUM(edge, "_ELTWISE_INPUT_PORT_ID_", eltwiseInputPortId);
return edge;
};
for (size_t i = 1; i < p.inputsNum + 1; ++i) {
tmp += generateEdge(i, p.inputsNum + i);
}
REPLACE_WITH_STR(model, "_EDGES_", tmp);
REPLACE_WITH_NUM(model, "_IN_", 1);
REPLACE_WITH_NUM(model, "_IC_", p.in.c);
REPLACE_WITH_NUM(model, "_IH_", p.in.h);
REPLACE_WITH_NUM(model, "_IW_", p.in.w);
REPLACE_WITH_STR(model, "_OP_", op);
return model;
}
protected:
virtual void SetUp() {
try {
eltwise_test_params p = ::testing::WithParamInterface<eltwise_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
std::vector<Blob::Ptr> srcs_vec;
InputsDataMap in_info_map = net.getInputsInfo();
for (auto info : in_info_map) {
Blob::Ptr inputBlob = inferRequest.GetBlob(info.first);
float* inputData = inputBlob->buffer().as<float*>();
if (p.op != eltwise_test_params::Pow)
CommonTestUtils::fill_data_sine(inputBlob->buffer().as<float*>(), inputBlob->size(), 100, 10, 10);
else
CommonTestUtils::fill_data_const(inputBlob, 2);
srcs_vec.push_back(inputBlob);
}
BlobMap dsts_map;
std::vector<Blob::Ptr> dsts_vec;
OutputsDataMap out_info_map = net.getOutputsInfo();
for (auto info : out_info_map) {
Blob::Ptr outputBlob = inferRequest.GetBlob(info.first);
dsts_map[info.first] = outputBlob;
Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), Layout::NCHW});
blob_ref->allocate();
dsts_vec.push_back(blob_ref);
}
ref_eltwise<float>(srcs_vec, dsts_vec, p);
inferRequest.Infer();
compare(*dsts_map.begin()->second, *dsts_vec[0]);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(EltwiseOnlyTest, TestsEltwise) {}
/*** TBD ***/

View File

@ -1,212 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include "ie_memcpy.h"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct gatherTF_test_params {
std::string device_name;
std::string inIdxPrecision;
SizeVector in_dim;
std::vector<float> in;
SizeVector dct_dim;
std::vector<float> dct;
int axis;
SizeVector ref_dim;
std::vector<float> ref;
};
class GatherTFTests : public TestsCommon, public WithParamInterface<gatherTF_test_params> {
std::string model_t = R"V0G0N(
<net Name="Gather_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="InputDictionary" type="Input" precision="FP32" id="1">
<output>
<port id="1">
_IDICT_
</port>
</output>
</layer>
<layer name="InputText" type="Input" precision="_IIDXP_" id="2">
<output>
<port id="2">
_IIDX_
</port>
</output>
</layer>
<layer name="gather" id="3" type="Gather" precision="FP32">
<data axis="_AX_"/>
<input>
<port id="1">
_IDICT_
</port>
<port id="2">
_IIDX_
</port>
</input>
<output>
<port id="3">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
</edges>
</net>
)V0G0N";
std::string getModel(gatherTF_test_params p) {
std::string model = model_t;
std::string inIdx;
std::string inDict;
std::string out;
for (auto& idx : p.in_dim) {
inIdx += "<dim>";
inIdx += std::to_string(idx) + "</dim>\n";
}
for (auto& dct : p.dct_dim) {
inDict += "<dim>";
inDict += std::to_string(dct) + "</dim>\n";
}
for (auto& dst : p.ref_dim) {
out += "<dim>";
out += std::to_string(dst) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
REPLACE_WITH_STR(model, "_IIDX_", inIdx);
REPLACE_WITH_STR(model, "_IDICT_", inDict);
REPLACE_WITH_NUM(model, "_AX_", p.axis);
REPLACE_WITH_STR(model, "_OUT_", out);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
// Input Indexes
Blob::Ptr srcIdx;
if (p.inIdxPrecision == "I32") {
srcIdx = make_shared_blob<int32_t>({Precision::I32, p.in_dim,
TensorDesc::getLayoutByDims(
p.in_dim)});
srcIdx->allocate();
auto *srcIdxPtr = dynamic_cast<TBlob<int32_t> *>(srcIdx.get());
if (srcIdxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<int32_t>.";
int32_t *srcIdxP = (int32_t*)srcIdx->buffer();
for (int i=0; i<p.in.size(); i++)
srcIdxP[i] = static_cast<int32_t>(p.in[i]);
} else if (p.inIdxPrecision == "FP32") {
srcIdx = make_shared_blob<float>({Precision::FP32, p.in_dim,
TensorDesc::getLayoutByDims(
p.in_dim)});
srcIdx->allocate();
auto *srcIdxPtr = dynamic_cast<TBlob<float> *>(srcIdx.get());
if (srcIdxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
ie_memcpy(static_cast<float *>(srcIdx->buffer()), srcIdx->byteSize(), &p.in[0], sizeof(float) * p.in.size());
} else if (p.inIdxPrecision == "I8") {
srcIdx = make_shared_blob<int8_t>({Precision::I8, p.in_dim,
TensorDesc::getLayoutByDims(
p.in_dim)});
srcIdx->allocate();
auto *srcIdxPtr = dynamic_cast<TBlob<int8_t> *>(srcIdx.get());
if (srcIdxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
int8_t *srcIdxP = (int8_t*)srcIdx->buffer();
for (int i=0; i<p.in.size(); i++)
srcIdxP[i] = static_cast<int8_t>(p.in[i]);
} else if (p.inIdxPrecision == "I16") {
srcIdx = make_shared_blob<int16_t>({Precision::I16, p.in_dim,
TensorDesc::getLayoutByDims(
p.in_dim)});
srcIdx->allocate();
auto *srcIdxPtr = dynamic_cast<TBlob<int16_t> *>(srcIdx.get());
if (srcIdxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<int16_t>.";
int16_t *srcIdxP = (int16_t*)srcIdx->buffer();
for (int i=0; i<p.in.size(); i++)
srcIdxP[i] = static_cast<int16_t>(p.in[i]);
}
// Input Dictionary
Blob::Ptr srcDict = make_shared_blob<float>({ Precision::FP32,
p.dct_dim, TensorDesc::getLayoutByDims(p.dct_dim) });
srcDict->allocate();
ie_memcpy(srcDict->buffer(), srcDict->byteSize(), &p.dct[0], sizeof(float)*p.dct.size());
auto * srcDictPtr = dynamic_cast<TBlob<float>*>(srcDict.get());
if (srcDictPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
// Output Data
OutputsDataMap out = net.getOutputsInfo();
std::pair<std::string, DataPtr> item = *out.begin();
TBlob<float>::Ptr output;
output = make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
inferRequest.SetBlob(item.first, output);
// Infer
inferRequest.SetBlob("InputDictionary", srcDict);
inferRequest.SetBlob("InputText", srcIdx);
inferRequest.Infer();
// Check results
if (memcmp((*output).data(), &p.ref[0], p.ref.size() * sizeof(float)) != 0)
FAIL() << "Wrong result with compare TF reference!";
} catch (const details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(GatherTFTests, TestsGather) {}
// Test data vectors
std::vector<float> in0 = { 0.f, 1.f, 1.f, 0.f };
std::vector<float> in1 = { 0.f, 1.f, 2.f, 1.f };
std::vector<float> dict = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f };
std::vector<float> dict2D = { 1.f, 2.f, 3.f, 4.f}; // 2x2
std::vector<float> ref_in0_a0_d223 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }; // 2x2x2x3
std::vector<float> ref_in0_a2_d232 = { 1.f, 2.f, 2.f, 1.f, 3.f, 4.f, 4.f, 3.f, 5.f, 6.f, 6.f, 5.f, 7.f, 8.f, 8.f, 7.f, 9.f, 10.f, 10.f, 9.f, 11.f, 12.f, 12.f, 11.f }; // 2x3x2x2
std::vector<float> ref_in1_a0_d322 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 5.f, 6.f, 7.f, 8.f }; // 2x2x2x2
std::vector<float> ref_in1_a1_d232 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 3.f, 4.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 9.f, 10.f }; // 2x2x2x2
std::vector<float> ref_in1_a2_d223 = { 1.f, 2.f, 3.f, 2.f, 4.f, 5.f, 6.f, 5.f, 7.f, 8.f, 9.f, 8.f, 10.f, 11.f, 12.f, 11.f }; // 2x2x2x2
std::vector<float> ref_in0_a0_d22 = { 1.f, 2.f, 3.f, 4.f, 3.f, 4.f, 1.f, 2.f }; // 2x2x2

View File

@ -1,266 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include "ir_gen_helper.hpp"
#include <utility>
#include <string>
#include <memory>
#include <vector>
using namespace InferenceEngine;
using namespace ::testing;
struct gather_tree_test_params {
SizeVector in_out_shape;
std::vector<int32_t> step_idx;
std::vector<int32_t> parent_idx;
std::vector<int32_t> max_seq_len;
std::vector<int32_t> end_token;
std::vector<int32_t> reference;
std::string device_name;
};
template <typename data_t>
void ref_gather_tree(
InferenceEngine::TBlob<data_t> &step_idx,
InferenceEngine::TBlob<int32_t> &parent_idx,
InferenceEngine::TBlob<int32_t> &max_seq_len,
InferenceEngine::TBlob<data_t> &end_token,
InferenceEngine::TBlob<data_t> &dst
) {
const data_t *step_idxPtr = step_idx.data();
const int32_t *parent_idxPtr = parent_idx.data();
const int32_t *max_seq_lenPtr = max_seq_len.data();
const data_t *end_tokenPtr = end_token.data();
data_t *final_idxPtr = dst.data();
SizeVector step_idx_dims = step_idx.getTensorDesc().getDims();
SizeVector parent_idx_dims = parent_idx.getTensorDesc().getDims();
SizeVector max_seq_len_dims = max_seq_len.getTensorDesc().getDims();
SizeVector final_idx_dims = dst.getTensorDesc().getDims();
int32_t max_time = step_idx_dims[0];
int32_t batch_size = step_idx_dims[1];
int32_t beam_width = step_idx_dims[2];
if (max_time != parent_idx_dims[0] || max_time != final_idx_dims[0] ||
batch_size != parent_idx_dims[1] || batch_size != final_idx_dims[1] || batch_size != max_seq_len_dims[0] ||
beam_width != parent_idx_dims[2] || beam_width != final_idx_dims[2]) {
FAIL() << " Input/Output tensors dimensions mismatch";
return;
}
for (int32_t time, batch = 0; batch < batch_size; batch++) {
for (int32_t beam = 0; beam < beam_width; beam++) {
int32_t max_sequence_in_beam = (std::min)(max_time, max_seq_lenPtr[batch]);
if (max_sequence_in_beam <= 0)
continue;
for (time = (max_time - 1); time >= max_sequence_in_beam; time--)
final_idxPtr[(time * batch_size + batch) * beam_width + beam] = (*end_tokenPtr);
for (int32_t parent = beam; time >= 0; time--) {
if (parent < 0 || parent >= beam_width) {
FAIL() << " Wrong parent index";
return;
}
int32_t idx = (time * batch_size + batch) * beam_width;
final_idxPtr[idx + beam] = step_idxPtr[idx + parent];
parent = parent_idxPtr[idx + parent];
}
bool finished = false;
data_t *final = &final_idxPtr[batch * beam_width + beam];
for (time = 0; time < max_sequence_in_beam; time++, final += (batch_size * beam_width)) {
if (finished)
(*final) = (*end_tokenPtr);
else if ((*final) == (*end_tokenPtr))
finished = true;
}
}
}
}
class GatherTreeTests : public TestsCommon, public WithParamInterface<gather_tree_test_params> {
std::string model_t = R"V0G0N(
<net Name="GatherTree_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="step_idx" type="Input" precision="I32" id="1">
<output>
<port id="1">
_IN_OUT_
</port>
</output>
</layer>
<layer name="parent_idx" type="Input" precision="I32" id="2">
<output>
<port id="2">
_IN_OUT_
</port>
</output>
</layer>
<layer name="max_seq_len" type="Input" precision="I32" id="3">
<output>
<port id="3">
<dim>_IN2_</dim>
</port>
</output>
</layer>
<layer name="end_token" type="Input" precision="I32" id="4">
<output>
<port id="4">
<dim>1</dim>
</port>
</output>
</layer>
<layer name="output" id="2" type="GatherTree" precision="I32">
<data/>
<input>
<port id="1">
_IN_OUT_
</port>
<port id="2">
_IN_OUT_
</port>
<port id="3">
<dim>_IN2_</dim>
</port>
<port id="4">
<dim>1</dim>
</port>
</input>
<output>
<port id="5">
_IN_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
<edge from-layer="3" from-port="3" to-layer="2" to-port="3"/>
<edge from-layer="4" from-port="4" to-layer="2" to-port="4"/>
</edges>
</net>
)V0G0N";
std::string getModel(gather_tree_test_params p) {
std::string model = model_t;
std::string in_out_shape;
for (auto& dct : p.in_out_shape) {
in_out_shape += "<dim>";
in_out_shape += std::to_string(dct) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
REPLACE_WITH_NUM(model, "_IN2_", p.in_out_shape[1]);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
gather_tree_test_params p = ::testing::WithParamInterface<gather_tree_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executableNetwork = ie.LoadNetwork(network, p.device_name);
InferRequest inferRequest = executableNetwork.CreateInferRequest();
// Output Data
InferenceEngine::OutputsDataMap out;
out = network.getOutputsInfo();
std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
InferenceEngine::TBlob<int32_t>::Ptr output;
output = InferenceEngine::make_shared_blob<int32_t>(item.second->getTensorDesc());
output->allocate();
// Output Reference
InferenceEngine::TBlob<int32_t> dst_ref(item.second->getTensorDesc());
dst_ref.allocate();
// Input Data
// step_idx
InferenceEngine::Blob::Ptr step_idx;
step_idx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_out_shape,
InferenceEngine::TensorDesc::getLayoutByDims(p.in_out_shape) });
step_idx->allocate();
memcpy(step_idx->buffer(), &p.step_idx[0], sizeof(int32_t)*p.step_idx.size());
auto * step_idxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(step_idx.get());
if (step_idxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<int32_t>.";
// parent_idx
InferenceEngine::Blob::Ptr parent_idx;
parent_idx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_out_shape,
InferenceEngine::TensorDesc::getLayoutByDims(p.in_out_shape) });
parent_idx->allocate();
memcpy(parent_idx->buffer(), &p.parent_idx[0], sizeof(int32_t)*p.parent_idx.size());
auto * parent_idxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(parent_idx.get());
if (parent_idxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<int32_t>.";
// max_seq_len
InferenceEngine::Blob::Ptr max_seq_len;
InferenceEngine::SizeVector max_seq_len_dim(1, p.in_out_shape[1]);
max_seq_len = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, max_seq_len_dim,
InferenceEngine::TensorDesc::getLayoutByDims(max_seq_len_dim) });
max_seq_len->allocate();
memcpy(max_seq_len->buffer(), &p.max_seq_len[0], sizeof(int32_t)*p.max_seq_len.size());
auto * max_seq_lenPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(max_seq_len.get());
if (max_seq_lenPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<int32_t>.";
// end_token
InferenceEngine::Blob::Ptr end_token;
InferenceEngine::SizeVector end_token_dim(1, 1);
end_token = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, end_token_dim,
InferenceEngine::TensorDesc::getLayoutByDims(end_token_dim) });
end_token->allocate();
memcpy(static_cast<int32_t*>(end_token->buffer()), &p.end_token[0], sizeof(int32_t));
auto * seq_lengthsIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(end_token.get());
if (seq_lengthsIdxPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<int32_t>.";
// Reference
ref_gather_tree(*step_idxPtr, *parent_idxPtr, *max_seq_lenPtr, *seq_lengthsIdxPtr, dst_ref);
if (p.reference.size())
if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(int32_t)) != 0)
FAIL() << "Wrong result with compare reference vector!";
// Infer
inferRequest.SetBlob("step_idx", step_idx);
inferRequest.SetBlob("parent_idx", parent_idx);
inferRequest.SetBlob("max_seq_len", max_seq_len);
inferRequest.SetBlob("end_token", end_token);
inferRequest.SetBlob(network.getOutputsInfo().begin()->first, output);
inferRequest.Infer();
ASSERT_EQ(dst_ref.size(), output->size());
for (int i = dst_ref.size()-1; i >= 0; i--)
ASSERT_EQ(dst_ref.data()[i], output->data()[i]);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(GatherTreeTests, TestsGatherTree) {}

View File

@ -1,182 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct padTF_test_params {
std::string device;
SizeVector in_size;
std::vector<float> in;
SizeVector pads_begin;
SizeVector pads_end;
std::string pad_mode;
float pad_value;
SizeVector ref_size;
std::vector<float> ref;
};
class PadTFTests : public TestsCommon, public WithParamInterface<padTF_test_params> {
std::string model_t = R"V0G0N(
<net Name="Pad_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="input" type="Input" precision="FP32" id="1">
<output>
<port id="1">
_IN_
</port>
</output>
</layer>
<layer name="output" id="2" type="Pad" precision="FP32">
<data pads_begin="_P_BEGIN_" pads_end="_P_END_" pad_mode="_P_MODE_" pad_value="_P_VAL_"/>
<input>
<port id="2">
_IN_
</port>
</input>
<output>
<port id="3">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
</edges>
</net>
)V0G0N";
std::string getModel(padTF_test_params p) {
std::string model = model_t;
std::string in_size;
std::string pads_begin;
std::string pads_end;
std::string ref_size;
for (auto& src : p.in_size) {
in_size += "<dim>";
in_size += std::to_string(src) + "</dim>\n";
}
for (auto& pb : p.pads_begin)
pads_begin += std::to_string(pb) + ",";
pads_begin.pop_back();
for (auto& pe : p.pads_end)
pads_end += std::to_string(pe) + ",";
pads_end.pop_back();
for (auto& dst : p.ref_size) {
ref_size += "<dim>";
ref_size += std::to_string(dst) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_IN_", in_size);
REPLACE_WITH_STR(model, "_P_BEGIN_", pads_begin);
REPLACE_WITH_STR(model, "_P_END_", pads_end);
REPLACE_WITH_STR(model, "_P_MODE_", p.pad_mode);
REPLACE_WITH_NUM(model, "_P_VAL_", p.pad_value);
REPLACE_WITH_STR(model, "_OUT_", ref_size);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
padTF_test_params p = ::testing::WithParamInterface<padTF_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device);
InferRequest inferRequest = executable_network.CreateInferRequest();
InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_size, InferenceEngine::TensorDesc::getLayoutByDims(p.in_size) });
src->allocate();
float* psrc = src->buffer().as<float*>();
std::copy(p.in.begin(), p.in.end(), psrc);
inferRequest.SetBlob("input", src);
InferenceEngine::Blob::Ptr dst = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.ref_size, InferenceEngine::TensorDesc::getLayoutByDims(p.ref_size) });
dst->allocate();
inferRequest.SetBlob("output", dst);
// Infer
inferRequest.Infer();
// Check results
TBlob<float> dst_ref({ Precision::FP32, p.ref_size, TensorDesc::getLayoutByDims(p.ref_size) });
dst_ref.allocate();
float* pdst_ref = dst_ref.buffer().as<float*>();
std::copy(p.ref.begin(), p.ref.end(), pdst_ref);
compare(*dst, dst_ref);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(PadTFTests, TestsPad) {}
std::string getTestName(testing::TestParamInfo<padTF_test_params> obj) {
std::string name = obj.param.device + "_" + obj.param.pad_mode;
return name;
}
// Examples of the standalone Pad operation input / output:
std::vector<float> in =
{1.f, 2.f, 3.f, 4.f,
5.f, 6.f, 7.f, 8.f,
9.f,10.f,11.f,12.f}; // 3x4
std::vector<float> ref_constant =
{0.f,0.f,0.f, 0.f, 0.f, 0.f,0.f,0.f,0.f,
0.f,0.f,0.f, 0.f, 0.f, 0.f,0.f,0.f,0.f,
0.f,0.f,1.f, 2.f, 3.f, 4.f,0.f,0.f,0.f,
0.f,0.f,5.f, 6.f, 7.f, 8.f,0.f,0.f,0.f,
0.f,0.f,9.f,10.f,11.f,12.f,0.f,0.f,0.f,
0.f,0.f,0.f, 0.f, 0.f, 0.f,0.f,0.f,0.f}; // 6x9
std::vector<float> ref_edge =
{1.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f,
1.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f,
1.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f,
5.f,5.f,5.f, 6.f, 7.f, 8.f, 8.f, 8.f, 8.f,
9.f,9.f,9.f,10.f,11.f,12.f,12.f,12.f,12.f,
9.f,9.f,9.f,10.f,11.f,12.f,12.f,12.f,12.f}; // 6x9
std::vector<float> ref_reflect =
{11.f,10.f,9.f,10.f,11.f,12.f,11.f,10.f,9.f,
7.f, 6.f,5.f, 6.f, 7.f, 8.f, 7.f, 6.f,5.f,
3.f, 2.f,1.f, 2.f, 3.f, 4.f, 3.f, 2.f,1.f,
7.f, 6.f,5.f, 6.f, 7.f, 8.f, 7.f, 6.f,5.f,
11.f,10.f,9.f,10.f,11.f,12.f,11.f,10.f,9.f,
7.f, 6.f,5.f, 6.f, 7.f, 8.f, 7.f, 6.f,5.f}; // 6x9
std::vector<float> ref_symmetric =
{6.f,5.f,5.f, 6.f, 7.f, 8.f, 8.f, 7.f, 6.f,
2.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f,
2.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f,
6.f,5.f,5.f, 6.f, 7.f, 8.f, 8.f, 7.f, 6.f,
10.f,9.f,9.f,10.f,11.f,12.f,12.f,11.f,10.f,
10.f,9.f,9.f,10.f,11.f,12.f,12.f,11.f,10.f}; // 6x9
#define PLUGING_CASE(_device, _test, __num, ...) \
INSTANTIATE_TEST_CASE_P(smoke_##_device##_run##__num, _test, ::testing::Values(padTF_test_params{#_device, __VA_ARGS__}) );
#define PLUGING_CASE_WITH_SUFFIX(_device, _suffix, _test, __num, ...) \
INSTANTIATE_TEST_CASE_P(_device##_run##_suffix##__num, _test, ::testing::Values(padTF_test_params{#_device, __VA_ARGS__}) );

View File

@ -1,298 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include <string>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct select_params
{
std::string device_name;
SizeVector input1_tensor;
SizeVector input2_tensor;
SizeVector mask_tensor;
std::string auto_broadcast;
bool fail_expected;
};
class SelectTests : public TestsCommon, public WithParamInterface<select_params> {
std::string model_base = R"V0G0N(
<net name="Select_net" version="7">
<layers>
<layer name="cond" type="Input" id="0" version="opset1">
<data element_type="boolean" shape="_MASK_SHAPE_"/>
<output>
<port id="0" precision="BOOL">_MASK_DIMS_</port>
</output>
</layer>
<layer name="input1" type="Input" id="1" version="opset1">
<data element_type="f32" shape="_INPUT1_SHAPE_"/>
<output>
<port id="0" precision="FP32">_INPUT1_DIMS_</port>
</output>
</layer>
<layer name="input2" type="Input" id="2" version="opset1">
<data element_type="f32" shape="_INPUT2_SHAPE_"/>
<output>
<port id="0" precision="FP32">_INPUT2_DIMS_</port>
</output>
</layer>
<layer name="select" id="3" type="Select" version="opset1">
<data auto_broadcast="_AUTO_BROADCAST_"/>
<input>
<port id="0">_MASK_DIMS_</port>
<port id="1">_INPUT1_DIMS_</port>
<port id="2">_INPUT2_DIMS_</port>
</input>
<output>
<port id="3" precision="FP32">_OUTPUT_DIMS_</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
</edges>
</net>
)V0G0N";
SizeVector get_output_tensor(const SizeVector& cond_dims, const SizeVector& input1_dims, const SizeVector& input2_dims)
{
auto max_in_size = std::max({cond_dims.size(), input1_dims.size(), input2_dims.size()});
auto out_size = std::max(max_in_size, (size_t)4);
SizeVector cond_dims_extended = cond_dims;
SizeVector in1_dims_extended = input1_dims;
SizeVector in2_dims_extended = input2_dims;
cond_dims_extended.insert(cond_dims_extended.begin(), out_size - cond_dims_extended.size(), 1);
in1_dims_extended.insert(in1_dims_extended.begin(), out_size - in1_dims_extended.size(), 1);
in2_dims_extended.insert(in2_dims_extended.begin(), out_size - in2_dims_extended.size(), 1);
SizeVector output_tensor(out_size, 1);
for (size_t i = 0; i < output_tensor.size(); i++) {
output_tensor[i] = std::max({ cond_dims_extended[i], in1_dims_extended[i], in2_dims_extended[i] });
}
return output_tensor;
}
std::string getModel(select_params p) {
std::string mask_shape_str = "";
std::string mask_dims_str = "";
for (size_t i=0; i<p.mask_tensor.size(); i++) {
mask_shape_str += std::to_string(p.mask_tensor[i]);
mask_dims_str += "\n ";
mask_dims_str += "<dim>" + std::to_string(p.mask_tensor[i]) + "</dim>";
if (i < p.mask_tensor.size() - 1) {
mask_shape_str += ",";
} else {
mask_dims_str += "\n ";
}
}
std::string input1_shape_str = "";
std::string input1_dims_str = "";
for (size_t i=0; i<p.input1_tensor.size(); i++) {
input1_shape_str += std::to_string(p.input1_tensor[i]);
input1_dims_str += "\n ";
input1_dims_str += "<dim>" + std::to_string(p.input1_tensor[i]) + "</dim>";
if (i < p.input1_tensor.size() - 1) {
input1_shape_str += ",";
} else {
input1_dims_str += "\n ";
}
}
std::string input2_shape_str = "";
std::string input2_dims_str = "";
for (size_t i=0; i<p.input2_tensor.size(); i++) {
input2_shape_str += std::to_string(p.input2_tensor[i]);
input2_dims_str += "\n ";
input2_dims_str += "<dim>" + std::to_string(p.input2_tensor[i]) + "</dim>";
if (i < p.input2_tensor.size() - 1) {
input2_shape_str += ",";
} else {
input2_dims_str += "\n ";
}
}
SizeVector output_tensor = get_output_tensor(p.mask_tensor, p.input1_tensor, p.input2_tensor);
std::string output_shape_str = "";
std::string output_dims_str = "";
for (size_t i=0; i<output_tensor.size(); i++) {
output_shape_str += std::to_string(output_tensor[i]);
output_dims_str += "\n ";
output_dims_str += "<dim>" + std::to_string(output_tensor[i]) + "</dim>";
if (i < output_tensor.size() - 1) {
output_shape_str += ",";
} else {
output_dims_str += "\n ";
}
}
REPLACE_WITH_STR(model_base, "_MASK_SHAPE_", mask_shape_str);
REPLACE_WITH_STR(model_base, "_MASK_DIMS_", mask_dims_str);
REPLACE_WITH_STR(model_base, "_INPUT1_SHAPE_", input1_shape_str);
REPLACE_WITH_STR(model_base, "_INPUT1_DIMS_", input1_dims_str);
REPLACE_WITH_STR(model_base, "_INPUT2_SHAPE_", input2_shape_str);
REPLACE_WITH_STR(model_base, "_INPUT2_DIMS_", input2_dims_str);
REPLACE_WITH_STR(model_base, "_OUTPUT_SHAPE_", output_shape_str);
REPLACE_WITH_STR(model_base, "_OUTPUT_DIMS_", output_dims_str);
REPLACE_WITH_STR(model_base, "_AUTO_BROADCAST_", p.auto_broadcast);
return model_base;
}
size_t get_index_bfhw(SizeVector tensor, size_t b, size_t f, size_t h, size_t w)
{
if ((tensor.size() < 4) || (b >= tensor[tensor.size() - 4])) b = 0;
if ((tensor.size() < 3) || (f >= tensor[tensor.size() - 3])) f = 0;
if ((tensor.size() < 2) || (h >= tensor[tensor.size() - 2])) h = 0;
if ((tensor.size() < 1) || (w >= tensor[tensor.size() - 1])) w = 0;
size_t res = 0;
size_t b_multiplier = 1;
if (tensor.size() >= 3) {
b_multiplier = std::accumulate(std::end(tensor) - 3, std::end(tensor), 1, std::multiplies<size_t>());
} else {
b_multiplier = std::accumulate(std::begin(tensor), std::end(tensor), 1, std::multiplies<size_t>());
}
res += b * b_multiplier;
size_t f_multiplier = 1;
if (tensor.size() >= 2) {
f_multiplier = std::accumulate(std::end(tensor) - 2, std::end(tensor), 1, std::multiplies<size_t>());
} else {
f_multiplier = std::accumulate(std::begin(tensor), std::end(tensor), 1, std::multiplies<size_t>());
}
res += f * f_multiplier;
size_t h_multiplier = 1;
if (tensor.size() >= 1) {
h_multiplier = std::accumulate(std::end(tensor) - 1, std::end(tensor), 1, std::multiplies<size_t>());
}
res += h * h_multiplier;
res += w;
return res;
}
void check_output(const float* input1, const float* input2, const uint8_t* mask, const float* output, select_params p) {
SizeVector output_tensor = get_output_tensor(p.mask_tensor, p.input1_tensor, p.input2_tensor);
size_t b_max = (output_tensor.size() > 0) ? output_tensor[0] : 1;
size_t f_max = (output_tensor.size() > 1) ? output_tensor[1] : 1;
size_t h_max = (output_tensor.size() > 2) ? output_tensor[2] : 1;
size_t w_max = (output_tensor.size() > 3) ? output_tensor[3] : 1;
for (size_t b = 0; b < b_max; b++) {
for (size_t f = 0; f < f_max; f++) {
for (size_t h = 0; h < h_max; h++) {
for (size_t w = 0; w < w_max; w++) {
if (mask[get_index_bfhw(p.mask_tensor, b, f, h, w)] == 0)
{
EXPECT_EQ(output[get_index_bfhw(output_tensor, b, f, h, w)],
input2[get_index_bfhw(p.input2_tensor, b, f, h, w)]);
}
else
{
EXPECT_EQ(output[get_index_bfhw(output_tensor, b, f, h, w)],
input1[get_index_bfhw(p.input1_tensor, b, f, h, w)]);
}
}
}
}
}
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
bool fail_expected = false;
try {
select_params p = ::testing::WithParamInterface<select_params>::GetParam();
fail_expected = p.fail_expected;
Core ie;
CNNNetwork net = ie.ReadNetwork(getModel(p), Blob::Ptr());
InputsDataMap in_info_map = net.getInputsInfo();
OutputsDataMap out_info_map = net.getOutputsInfo();
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest infer_request = executable_network.CreateInferRequest();
uint8_t* mask;
float* input1_ptr, *input2_ptr;
auto input_iterator = in_info_map.begin();
size_t input1_buffer_size = std::accumulate(std::begin(p.input1_tensor), std::end(p.input1_tensor), 1, std::multiplies<size_t>());
size_t input2_buffer_size = std::accumulate(std::begin(p.input2_tensor), std::end(p.input2_tensor), 1, std::multiplies<size_t>());
// Creating mask buffer
// If true, take value from first buffer, if false, take from second
Blob::Ptr maskBlob = infer_request.GetBlob(input_iterator->first);
mask = maskBlob->buffer().as<uint8_t*>();
for (size_t id = 0; id < maskBlob->size(); id++) {
mask[id] = (id % 2);
}
input_iterator++;
// Inputs random generator
Blob::Ptr input1Blob = infer_request.GetBlob(input_iterator->first);
input_iterator++;
Blob::Ptr input2Blob = infer_request.GetBlob(input_iterator->first);
input1_ptr = input1Blob->buffer().as<float*>();
input2_ptr = input2Blob->buffer().as<float*>();
for (int index = 0; index < input1_buffer_size; index++) {
input1_ptr[index] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
for (int index = 0; index < input2_buffer_size; index++) {
input2_ptr[index] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
// Output allocation
SizeVector output_tensor = get_output_tensor(p.mask_tensor, p.input1_tensor, p.input2_tensor);
Blob::Ptr outputBlob = infer_request.GetBlob(out_info_map.begin()->first);
TBlob<float> dst_ref({ Precision::FP32, output_tensor, Layout::NCHW });
dst_ref.allocate();
infer_request.Infer();
// Output buffer
outputBlob = infer_request.GetBlob(out_info_map.begin()->first);
const float* output_ptr = outputBlob->buffer().as<float*>();
check_output(input1_ptr, input2_ptr, mask, output_ptr, p);
}
catch (const InferenceEngine::details::InferenceEngineException & e) {
if (!fail_expected) {
FAIL() << e.what();
}
}
}
};

View File

@ -1,244 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
struct softmax_base_params {
struct {
size_t w;
size_t h;
size_t c;
size_t n;
} in;
int axis;
};
struct softmax_test_params : softmax_base_params {
std::string device_name;
std::string model;
softmax_test_params(std::string name, softmax_base_params params, std::string model = "4D") :
softmax_base_params(params), device_name(name), model(model) {}
};
template <typename data_t>
void check_softmax_fwd(const data_t *src_data, softmax_test_params prm)
{
size_t W = prm.in.w;
size_t H = prm.in.h;
size_t C = prm.in.c;
size_t MB = prm.in.n;
auto off = [=](int n, int c, int h, int w)
{
return (n * W * H * C + c * W * H + h * W + w);
};
double result = 0.0f;
if(prm.axis == 0) {
for (int c = 0; c < C; ++c) {
for (int h = 0; h < H; ++h) {
for (int w = 0; w < W; ++w) {
result = 0.0f;
for (int n = 0; n < MB; ++n) {
result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
}
ASSERT_NEAR(result, 1.0f, 0.001);
}
}
}
}
else if(prm.axis == 1) {
for (int n = 0; n < MB; ++n) {
for (int h = 0; h < H; ++h) {
for (int w = 0; w < W; ++w) {
result = 0.0f;
for (int c = 0; c < C; ++c) {
result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
}
ASSERT_NEAR(result, 1.0f, 0.001);
}
}
}
}
else if(prm.axis == 2) {
for (int n = 0; n < MB; ++n) {
for (int c = 0; c < C; ++c) {
for (int w = 0; w < W; ++w) {
result = 0.0f;
for (int h = 0; h < H; ++h) {
result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
}
ASSERT_NEAR(result, 1.0f, 0.001);
}
}
}
}
else if(prm.axis == 3) {
for (int n = 0; n < MB; ++n) {
for (int c = 0; c < C; ++c) {
for (int h = 0; h < H; ++h) {
result = 0.0f;
for (int w = 0; w < W; ++w) {
result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
}
ASSERT_NEAR(result, 1.0f, 0.001);
}
}
}
}
}
class SoftmaxOnlyTest: public TestsCommon,
public WithParamInterface<softmax_test_params> {
std::string model_t = R"V0G0N(
<Net Name="SoftmaxOnly" version="2" precision="FP32" batch="_IB_">
<layers>
<layer name="input_1" type="input" id="0" precision="FP32">
<output>
<port id="0">
<dim>_IB_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="softmax" id="1" type="Softmax" precision="FP32">
<input>
<port id="0">
<dim>_IB_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</input>
<output>
<port id="1">
<dim>_IB_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
</edges>
</Net>
)V0G0N";
std::string model_2D = R"V0G0N(
<Net Name="SoftmaxOnly" version="2" precision="FP32" batch="_IB_">
<layers>
<layer name="input_1" type="input" id="0" precision="FP32">
<output>
<port id="0">
<dim>_IB_</dim>
<dim>_IC_</dim>
</port>
</output>
</layer>
<layer name="softmax" id="1" type="Softmax" precision="FP32">
<input>
<port id="0">
<dim>_IB_</dim>
<dim>_IC_</dim>
</port>
</input>
<output>
<port id="1">
<dim>_IB_</dim>
<dim>_IC_</dim>
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
</edges>
</Net>
)V0G0N";
std::string getModel(softmax_test_params p) {
std::string model = p.model == "2D" ? model_2D : model_t;
REPLACE_WITH_NUM(model, "_IB_", p.in.n);
REPLACE_WITH_NUM(model, "_IW_", p.in.w);
REPLACE_WITH_NUM(model, "_IH_", p.in.h);
REPLACE_WITH_NUM(model, "_IC_", p.in.c);
return model;
}
protected:
virtual void SetUp() {
try {
softmax_test_params p = ::testing::WithParamInterface<softmax_test_params>::GetParam();
std::string model = getModel(p);
bool is2D = p.model == "2D";
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
InputsDataMap in_info_map = net.getInputsInfo();
OutputsDataMap out_info_map = net.getOutputsInfo();
if (p.in.n != 1) {
net.setBatchSize(p.in.n);
}
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
auto src = inferRequest.GetBlob(in_info_map.begin()->first);
auto src_data = src->buffer().as<float*>();
for (int i=0; i != p.in.n; i++) {
fill_data(src_data + p.in.w * p.in.h * p.in.c * i, src->size() / p.in.n);
}
inferRequest.Infer();
auto dst = inferRequest.GetBlob(out_info_map.begin()->first);
check_softmax_fwd(dst->buffer().as<float*>(), p);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
#define case_1 softmax_base_params({{228, 228, 3, 1}, 1})
#define case_8 softmax_base_params({{228, 228, 3, 8}, 1})
#define case_8_nc softmax_base_params({{1, 1, 228*228*3, 8}, 1})
TEST_P(SoftmaxOnlyTest, TestsSoftmax) {}
std::string getTestCaseName(testing::TestParamInfo<softmax_test_params> obj) {
return obj.param.device_name +
"_h" + std::to_string(obj.param.in.h) +
"_w" + std::to_string(obj.param.in.w) +
"_c" + std::to_string(obj.param.in.c) +
"_b" + std::to_string(obj.param.in.n);
}

View File

@ -1,182 +0,0 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include <ie_core.hpp>
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct space_to_depth_test_params {
std::string device_name;
std::string inPrecision;
InferenceEngine::SizeVector in_dim;
std::string depth_mode;
size_t block_size;
InferenceEngine::SizeVector ref_dim;
};
template<typename data_t>
void ref_spaceToDepth(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, space_to_depth_test_params& prm) {
assert(dsts.size() == 1);
data_t *dst_data = dsts[0]->buffer().as<data_t*>();
const data_t *src_data = srcs[0]->buffer().as<data_t*>();
size_t feature_in = prm.in_dim[1];
size_t y_in = prm.in_dim[2];
size_t x_in = prm.in_dim[3];
size_t batch_out = prm.ref_dim[0];
size_t feature_out = prm.ref_dim[1];
size_t y_out = prm.ref_dim[2];
size_t x_out = prm.ref_dim[3];
if (prm.depth_mode != "depth_first" && prm.depth_mode != "blocks_first")
FAIL() << " Invalid mode for spaceToDepth: must be \"blocks_first\" or \"depth_first\" only";
if (prm.block_size < 1)
FAIL() << " Invalid block size number: must be greater than or equal to 1";
if (y_in % prm.block_size != 0 || x_in % prm.block_size != 0)
FAIL () << " Invalid sizes of spatials x, y: must be divisible by block size";
for (size_t batch = 0; batch < batch_out; ++batch) {
for (size_t feature = 0; feature < feature_out; ++feature) {
size_t offset_in, f_in;
if (prm.depth_mode == "blocks_first") {
offset_in = feature / feature_in;
f_in = feature % feature_in;
} else {
offset_in = feature % (prm.block_size * prm.block_size);
f_in = feature / (prm.block_size * prm.block_size);
}
for (size_t y = 0; y < y_out; ++y) {
size_t input_y = (y * prm.block_size) + (offset_in / prm.block_size);
for (size_t x = 0; x < x_out; ++x) {
size_t input_x = (x * prm.block_size) + (offset_in % prm.block_size);
size_t f_in_offset = (input_y * x_in) + input_x;
size_t input_index = (batch * feature_in * y_in * x_in) + (f_in * y_in * x_in) + f_in_offset;
size_t output_index = (batch * feature_out * y_out * x_out) + (feature * y_out * x_out) + (y * x_out) + x;
dst_data[output_index] = src_data[input_index];
}
}
}
}
}
class SpaceToDepthTests : public TestsCommon, public WithParamInterface<space_to_depth_test_params> {
std::string model_t = R"V0G0N(
<net Name="Space2depth_net" version="2" precision="FP32" batch="1">
<layers>
<layer name="Input0" type="Input" precision="_IPRS_" id="1">
<output>
<port id="1">
_IDIM_
</port>
</output>
</layer>
<layer name="SpaceToDepth" id="3" type="SpaceToDepth" precision="FP32">
<data block_size="_BS_" depth_mode="_DM_"/>
<input>
<port id="1">
_IDIM_
</port>
</input>
<output>
<port id="3">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
</edges>
</net>
)V0G0N";
std::string getModel(space_to_depth_test_params p) {
std::string model = model_t;
std::string inIdx;
std::string inDict;
std::string out;
for (auto& dct : p.in_dim) {
inDict += "<dim>";
inDict += std::to_string(dct) + "</dim>\n";
}
for (auto& dst : p.ref_dim) {
out += "<dim>";
out += std::to_string(dst) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_IPRS_", p.inPrecision);
REPLACE_WITH_STR(model, "_IDIM_", inDict);
REPLACE_WITH_STR(model, "_DM_", p.depth_mode);
REPLACE_WITH_NUM(model, "_BS_", p.block_size);
REPLACE_WITH_STR(model, "_OUT_", out);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
space_to_depth_test_params p = ::testing::WithParamInterface<space_to_depth_test_params>::GetParam();
std::string model = getModel(p);
Core ie;
CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
std::vector<Blob::Ptr> srcs_vec;
std::vector<Blob::Ptr> dsts_vec;
std::vector<Blob::Ptr> out_vec;
InputsDataMap in_info_map = net.getInputsInfo();
for (auto info : in_info_map) {
Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
blob->allocate();
fill_data_dbgval(blob->buffer().as<float*>(), blob->size());
inferRequest.SetBlob(info.first, blob);
srcs_vec.push_back(blob);
}
OutputsDataMap out_info_map = net.getOutputsInfo();
for (auto info : out_info_map) {
Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
blob->allocate();
inferRequest.SetBlob(info.first, blob);
out_vec.push_back(blob);
Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
blob_ref->allocate();
dsts_vec.push_back(blob_ref);
}
ref_spaceToDepth<float>(srcs_vec, dsts_vec, p);
inferRequest.Infer();
compare(*out_vec[0], *dsts_vec[0]);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
TEST_P(SpaceToDepthTests, TestsSpaceToDepth) {}

View File

@ -1,131 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include <ie_core.hpp>
#include "ir_gen_helper.hpp"
#include "common_test_utils/data_utils.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace single_layer_tests;
struct tile_test_base_params {
SizeVector shape_1;
SizeVector shape_2;
int axis;
int tiles;
};
struct tile_test_params : public tile_test_base_params {
std::string device_name;
tile_test_params(std::string name, tile_test_base_params params)
: device_name(name), tile_test_base_params(params) {}
};
class TileTest: public TestsCommon, public WithParamInterface<tile_test_params> {
std::string model_t = R"V0G0N(
<net batch="1" name="tile_net" version="5">
<layers>
<layer id="0" name="data" precision="FP32" type="Input">
<output>
<port id="0">
_SHAPE_1_
</port>
</output>
</layer>
<layer id="1" name="tile" precision="FP32" type="Tile">
<data axis="_AXIS_" tiles="_TILES_" />
<input>
<port id="0">
_SHAPE_1_
</port>
</input>
<output>
<port id="1">
_SHAPE_2_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
</edges>
</net>
)V0G0N";
std::string shape_xml(const SizeVector shape) {
std::string res;
for (auto dim : shape)
res += "<dim>" + std::to_string(dim) + "</dim>";
return res;
}
std::string getModel() {
auto p = ::testing::WithParamInterface<tile_test_params>::GetParam();
auto shape_1_xml = shape_xml(p.shape_1);
auto shape_2_xml = shape_xml(p.shape_2);
std::string model = model_t;
REPLACE_WITH_STR(model, "_SHAPE_1_", shape_1_xml);
REPLACE_WITH_STR(model, "_SHAPE_2_", shape_2_xml);
REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
REPLACE_WITH_NUM(model, "_TILES_", p.tiles);
return model;
}
protected:
virtual void SetUp() {
try {
auto p = GetParam();
std::string model = getModel();
InferenceEngine::Core ie;
auto network = ie.ReadNetwork(model, Blob::CPtr());
auto exec = ie.LoadNetwork(network, p.device_name);
auto req = exec.CreateInferRequest();
auto in_blob = req.GetBlob("data");
CommonTestUtils::fill_data_const(in_blob, 7);
req.Infer();
TensorDesc desc {Precision::FP32, p.shape_2, TensorDesc::getLayoutByDims(p.shape_2)};
Blob::Ptr out_ref = make_shared_blob<float>(desc);
out_ref->allocate();
CommonTestUtils::fill_data_const(out_ref, 7);
compare(*out_ref, *req.GetBlob("tile"));
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
#define case_1 tile_test_base_params{ {1}, {5}, 0, 5 }
#define case_2 tile_test_base_params{ {2}, {6}, 0, 3 }
#define case_3 tile_test_base_params{ {1, 3}, {5, 3}, 0, 5 }
#define case_4 tile_test_base_params{ {1, 3}, {1, 6}, 1, 2 }
#define case_5 tile_test_base_params{ {1, 2, 3}, {5, 2, 3}, 0, 5 }
#define case_6 tile_test_base_params{ {1, 2, 3}, {1, 4, 3}, 1, 2 }
#define case_7 tile_test_base_params{ {1, 2, 3}, {1, 2, 6}, 2, 2 }
#define case_8 tile_test_base_params{ {1, 2, 3, 4}, {5, 2, 3, 4}, 0, 5 }
#define case_9 tile_test_base_params{ {1, 2, 3, 4}, {1, 4, 3, 4}, 1, 2 }
#define case_10 tile_test_base_params{ {1, 2, 3, 4}, {1, 2, 6, 4}, 2, 2 }
#define case_11 tile_test_base_params{ {1, 2, 3, 4}, {1, 2, 3, 8}, 3, 2 }
#define case_12 tile_test_base_params{ {1, 2, 3, 4, 2}, {5, 2, 3, 4, 2}, 0, 5 }
#define case_13 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 4, 3, 4, 2}, 1, 2 }
#define case_14 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 2, 6, 4, 2}, 2, 2 }
#define case_15 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 2, 3, 8, 2}, 3, 2 }
#define case_16 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 2, 3, 4, 4}, 4, 2 }
TEST_P(TileTest, TestsGeneralTile) {}

View File

@ -1,299 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct topk_test_params {
std::string device_name;
SizeVector in_shape;
int axis;
std::vector<size_t> src_k;
std::string sort;
std::string mode;
SizeVector out_shape;
Precision precision;
};
static inline int count(std::vector<size_t> dims, size_t start_ind, size_t end_ind) {
size_t count = 1;
for (size_t i = start_ind; i < end_ind; i++)
count *= dims[i];
return static_cast<int>(count);
}
static inline int count(std::vector<size_t> dims, size_t start_ind = 0) {
return count(dims, start_ind, dims.size());
}
template <typename T>
static void ref_topk(TBlob<T> &src, TBlob<T> &dst_data, TBlob<int> &dst_indx, topk_test_params p) {
T* src_data = src.data();
T* dst_val = dst_data.data();
int* dst_idx = dst_indx.data();
int dim, axis_dist;
int src_k = static_cast<int>(p.src_k[0]);
SizeVector src_dims = src.getTensorDesc().getDims();;
int axis_ = p.axis;
if (axis_ < 0)
axis_ += src_dims.size();
size_t axis = static_cast<size_t>(axis_);
if (src_dims.size() < (1 + axis))
FAIL() << " Incorrect input parameters dimensions and axis number!";
bool mode_max;
if (p.mode == "max")
mode_max = true;
else
mode_max = false;
bool sort_value;
if (p.sort == "value")
sort_value = true;
else
sort_value = false;
int j;
for (j = src_dims.size() - 1; j >= 0; j--) {
if (src_dims[j] != 1) break;
}
if (static_cast<size_t>(j) == axis) {
dim = count(src_dims, static_cast<size_t>(j));
axis_dist = 1;
} else {
int axis_ = (p.axis < 0) ? p.axis + static_cast<int>(src_dims.size()) : p.axis;
dim = static_cast<int>(src_dims[axis_]);
axis_dist = count(src_dims, axis_) / dim;
}
int num = count(src_dims) / dim;
std::vector<std::pair<T, int> > src_vector(src_k);
for (int i = 0; i < num; ++i) {
src_vector[0] = std::make_pair(src_data[(i / axis_dist * dim) * axis_dist + i % axis_dist], 0);
for (j = 1; j < src_k; ++j) {
src_vector[j] = std::make_pair(src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
if (mode_max) {
if (src_vector[j].first > src_vector[j - 1].first)
std::sort(src_vector.begin(), src_vector.begin() + j + 1, std::greater<std::pair<T, int> >());
} else {
if (src_vector[j].first < src_vector[0].first)
std::sort(src_vector.begin(), src_vector.begin() + j + 1, std::less<std::pair<T, int> >());
}
}
for (; j < dim; ++j) {
T value = src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist];
if (mode_max) {
if (value > src_vector[src_k - 1].first) {
src_vector[src_k - 1] = std::make_pair(value, j);
std::sort(src_vector.begin(), src_vector.end(), std::greater<std::pair<T, int> >());
}
} else {
if (value < src_vector[0].first) {
src_vector[src_k - 1] = std::make_pair(value, j);
std::sort(src_vector.begin(), src_vector.end(), std::less<std::pair<T, int> >());
}
}
}
if (!sort_value)
std::sort(src_vector.begin(), src_vector.begin() + src_k, [&src_vector](const pair<int, int> &a, const pair<int, int> &b)
{ return (a.second < b.second); });
for (int j = 0; j < src_k; ++j) {
if (axis_dist != 1) {
// Produces max_val per axis
dst_val[(i / axis_dist * src_k + j) * axis_dist + i % axis_dist] = src_vector[j].first;
dst_idx[(i / axis_dist * src_k + j) * axis_dist + i % axis_dist] = src_vector[j].second;
} else {
// Produces max_ind and max_val
dst_val[i * src_k + j] = src_vector[j].first;
dst_idx[i * src_k + j] = src_vector[j].second;
}
}
}
}
template <typename src_data_t>
class TopKTests : public TestsCommon, public WithParamInterface<topk_test_params> {
std::string model_t = (std::string)R"V0G0N(
<net Name="TopK_net" version="2" precision="_SRC_DATA_T_" batch="1">
<layers>
<layer name="value" type="Input" precision="_SRC_DATA_T_" id="1">
<output>
<port id="1">
_IN_
</port>
</output>
</layer>
<layer name="src_k" type="Const" precision="I32" id="2">
<output>
<port id="2"/>
</output>
<blobs>
<custom offset="0" size="1"/>
</blobs>
</layer>
<layer name="output" id="3" type="TopK">
<data axis="_AXIS_" sort="_SORT_" mode="_MODE_"/>
<input>
<port id="1">
_IN_
</port>
<port id="2"/>
</input>
<output>
<port id="3" precision="_SRC_DATA_T_">
_OUT_
</port>
<port id="4" precision="I32">
_OUT_
</port>
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
</edges>
</net>
)V0G0N";
std::string getModel(topk_test_params p) {
std::string model = model_t;
std::string in_shape;
std::string out_shape;
for (size_t i = 0; i < p.out_shape.size(); i++) {
out_shape += "<dim>";
out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
}
REPLACE_WITH_STR(model, "_OUT_", out_shape);
for (auto& dct : p.in_shape) {
in_shape += "<dim>";
in_shape += std::to_string(dct) + "</dim>\n";
}
switch (p.precision) {
case Precision::FP32:
REPLACE_WITH_STR(model, "_SRC_DATA_T_", "FP32"); break;
case Precision::I32:
REPLACE_WITH_STR(model, "_SRC_DATA_T_", "I32"); break;
default:
THROW_IE_EXCEPTION << "Unsupported test precision";
}
REPLACE_WITH_STR(model, "_IN_", in_shape);
REPLACE_WITH_STR(model, "_SORT_", p.sort);
REPLACE_WITH_STR(model, "_MODE_", p.mode);
REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
return model;
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
TestsCommon::SetUp();
topk_test_params p = ::testing::WithParamInterface<topk_test_params>::GetParam();
std::string model = getModel(p);
TBlob<uint8_t>* top_k = new TBlob<uint8_t>(
{ Precision::U8,{ p.src_k.size() * sizeof(int32_t) }, Layout::C });
top_k->allocate();
for (size_t i = 0; i < p.src_k.size(); i++) {
((int32_t *) top_k->buffer())[i] = p.src_k[i];
}
Core ie;
CNNNetwork net = ie.ReadNetwork(model, TBlob<uint8_t>::Ptr(top_k));
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
InferRequest inferRequest = executable_network.CreateInferRequest();
// Output Data
OutputsDataMap out;
out = net.getOutputsInfo();
BlobMap outputBlobs;
auto it = out.begin();
std::pair<std::string, DataPtr> item0 = *it;
std::pair<std::string, DataPtr> item1 = *(++it);
typename TBlob<src_data_t>::Ptr output0;
output0 = make_shared_blob<src_data_t>(item0.second->getTensorDesc());
output0->allocate();
inferRequest.SetBlob(item0.first, output0);
TBlob<int>::Ptr output1;
output1 = make_shared_blob<int>(item1.second->getTensorDesc());
output1->allocate();
inferRequest.SetBlob(item1.first, output1);
// Input Data
Blob::Ptr src;
src = make_shared_blob<src_data_t>({ p.precision, p.in_shape, TensorDesc::getLayoutByDims(p.in_shape) });
src->allocate();
for (size_t i = 0; i < src->size(); i++) {
src->buffer().as<src_data_t*>()[i] = i % 2 == 0 ? static_cast<src_data_t>(i) : static_cast<src_data_t>(-1.f * i - i * 2);
}
inferRequest.SetBlob("value", src);
// Output Reference
TBlob<src_data_t> dst_data_ref(item0.second->getTensorDesc());
dst_data_ref.allocate();
TBlob<int> dst_indx_ref(item1.second->getTensorDesc());
dst_indx_ref.allocate();
auto* srcPtr = dynamic_cast<TBlob<src_data_t>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<src_data_t>.";
ref_topk<src_data_t>(*srcPtr, dst_data_ref, dst_indx_ref, p);
inferRequest.Infer();
for (size_t i = 0; i < dst_data_ref.size(); i++) {
if (dst_data_ref.buffer().template as<src_data_t*>()[i] != output0.get()->buffer().template as<src_data_t*>()[i]) {
FAIL() << "The difference between ref_val " << dst_data_ref.buffer().template as<src_data_t*>()[i] <<
" and res_val " << output0.get()->buffer().template as<src_data_t*>()[i] << " at " << i << " index";
}
}
for (size_t i = 0; i < dst_data_ref.size(); i++) {
if (dst_indx_ref.buffer().as<int*>()[i] != output1.get()->buffer().as<int*>()[i]) {
FAIL() << "The difference between ref_idx " << dst_indx_ref.buffer().as<int*>()[i] <<
" and res_idx " << output1.get()->buffer().as<int*>()[i] << " at " << i << " index";
}
}
} catch (const details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
};
using topk_test_int32 = TopKTests<int32_t>;
using topk_test_fp32 = TopKTests<float>;
TEST_P(topk_test_int32, TestsTopK_I32) {}
TEST_P(topk_test_fp32, TestsTopK_FP32) {}

View File

@ -1,244 +0,0 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <cmath>
#include <string>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
using namespace ::testing;
using namespace InferenceEngine;
using namespace std;
struct variadic_split_params
{
std::string device_name;
int axis;
std::vector<int> variadic_lenghts;
SizeVector input_dims;
std::vector<SizeVector> output_dims;
};
class VariadicSplitTests : public TestsCommon, public WithParamInterface<variadic_split_params> {
std::string model_base = R"V0G0N(
<net name="Activation" version="10">
<layers>
<layer id="0" name="in1" type="Parameter" version="opset1">
<data element_type="f32" shape="_IB_,_IC_,_IH_,_IW_"/>
<output>
<port id="0" precision="FP32">
<dim>_IB_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer id="1" name="const1" type="Const" version="opset1">
<data offset="0" size="8"/>
<output>
<port id="0" precision="I64"/>
</output>
</layer>
<layer id="2" name="const2" type="Const" version="opset1">
<data offset="8" size="_VARIADIC_LENGHTS_BYTE_SIZE_"/>
<output>
<port id="0" precision="I64">
<dim>_VARIADIC_LENGHTS_SIZE_</dim>
</port>
</output>
</layer>
<layer id="3" name="split" type="VariadicSplit" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>_IB_</dim>
<dim>_IC_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
<port id="1" precision="I64"/>
<port id="2" precision="I64">
<dim>_VARIADIC_LENGHTS_SIZE_</dim>
</port>
</input>
<output>
_VARIADIC_OUTPUTS_
</output>
</layer>
_OUTPUT_LAYERS_
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
_OUTPUT_PORTS_
</edges>
</net>
)V0G0N";
std::string getModel(variadic_split_params p) {
std::string variadic_outputs, output_layers, output_ports;
size_t variadic_port_id = 3;
for (auto& size_vector : p.output_dims) {
variadic_outputs += "<port id=\"" + std::to_string(variadic_port_id) + "\" precision=\"FP32\">\n";
variadic_outputs += "<dim>" + std::to_string(size_vector[0]) + "</dim>\n";
variadic_outputs += "<dim>" + std::to_string(size_vector[1]) + "</dim>\n";
variadic_outputs += "<dim>" + std::to_string(size_vector[2]) + "</dim>\n";
variadic_outputs += "<dim>" + std::to_string(size_vector[3]) + "</dim>\n";
variadic_outputs += "</port>\n";
variadic_port_id++;
}
size_t layer_id = 4;
size_t layer_name_id = 1;
for (auto& size_vector : p.output_dims) {
output_layers += "<layer name=\"output" + std::to_string(layer_name_id) + "\" type=\"Result\" id=\"" + std::to_string(layer_id) + "\" version=\"opset1\">\n";
output_layers += "<input>\n";
output_layers += "<port id=\"0\" precision=\"FP32\">\n";
output_layers += "<dim>" + std::to_string(size_vector[0]) + "</dim>\n";
output_layers += "<dim>" + std::to_string(size_vector[1]) + "</dim>\n";
output_layers += "<dim>" + std::to_string(size_vector[2]) + "</dim>\n";
output_layers += "<dim>" + std::to_string(size_vector[3]) + "</dim>\n";
output_layers += "</port>\n";
output_layers += "</input>\n";
output_layers += "</layer>\n";
layer_id++;
layer_name_id++;
}
for (int id = 3; id < p.variadic_lenghts.size() + 3; id++) {
output_ports += "<edge from-layer=\"3\" from-port=\"" + std::to_string(id) + "\" to-layer=\"" + std::to_string(id + 1) + "\" to-port=\"0\"/>\n";
}
REPLACE_WITH_STR(model_base, "_IB_", std::to_string(p.input_dims[0]));
REPLACE_WITH_STR(model_base, "_IC_", std::to_string(p.input_dims[1]));
REPLACE_WITH_STR(model_base, "_IH_", std::to_string(p.input_dims[2]));
REPLACE_WITH_STR(model_base, "_IW_", std::to_string(p.input_dims[3]));
REPLACE_WITH_STR(model_base, "_VARIADIC_LENGHTS_BYTE_SIZE_", std::to_string(p.variadic_lenghts.size() * sizeof(int64_t)));
REPLACE_WITH_STR(model_base, "_VARIADIC_LENGHTS_SIZE_", std::to_string(p.variadic_lenghts.size()));
REPLACE_WITH_STR(model_base, "_VARIADIC_OUTPUTS_", variadic_outputs);
REPLACE_WITH_STR(model_base, "_OUTPUT_LAYERS_", output_layers);
REPLACE_WITH_STR(model_base, "_OUTPUT_PORTS_", output_ports);
return model_base;
}
size_t get_index_bfhw(SizeVector tensor, size_t b, size_t f, size_t h, size_t w)
{
size_t res = 0;
res += b * (tensor[1] * tensor[2] * tensor[3]);
res += f * (tensor[2] * tensor[3]);
res += h * (tensor[3]);
res += w;
return res;
}
void check_buffers_after_split(InferRequest& inf_req, InputsDataMap& inputs, OutputsDataMap& outputs, variadic_split_params vs_params){
Blob::Ptr inputBlob = inf_req.GetBlob(inputs.begin()->first);
float* src_ptr = inputBlob->buffer().as<float*>();
size_t outputs_number = outputs.size();
std::vector<const float*> output_ptrs(outputs_number);
// Getting raw output pointers
OutputsDataMap::iterator output_it = outputs.begin();
for (size_t index = 0; index < outputs_number; ++index) {
Blob::Ptr temp_blob = inf_req.GetBlob(output_it->first);
output_ptrs[index] = temp_blob->buffer().as<float*>();
output_it++;
}
// Getting number of elements inside buffer
auto input_tensor = vs_params.input_dims;
size_t input_tensor_size = input_tensor[0] * input_tensor[1] * input_tensor[2] * input_tensor[3];
std::vector<size_t> output_tensor_sizes(outputs_number);
for (size_t output_id = 0; output_id < outputs_number; ++output_id) {
auto output_tensors = vs_params.output_dims;
output_tensor_sizes[output_id] =
output_tensors[output_id][0] * output_tensors[output_id][1] * output_tensors[output_id][2] * output_tensors[output_id][3];
}
// Comparing input and output buffers
SizeVector input_it_tensor = { 0, 0, 0, 0 };
SizeVector output_tensor = { 0, 0, 0, 0 };
for (size_t output_id = 0; output_id < outputs_number; ++output_id) {
// Tensor iteration
for (size_t b = input_it_tensor[0]; b < input_it_tensor[0] + vs_params.output_dims[output_id][0]; b++) {
for (size_t f = input_it_tensor[1]; f < input_it_tensor[1] + vs_params.output_dims[output_id][1]; f++) {
for (size_t h = input_it_tensor[2]; h < input_it_tensor[2] + vs_params.output_dims[output_id][2]; h++) {
for (size_t w = input_it_tensor[3]; w < input_it_tensor[3] + vs_params.output_dims[output_id][3]; w++) {
ASSERT_EQ(
src_ptr[get_index_bfhw(vs_params.input_dims, b, f, h, w)],
output_ptrs[output_id][get_index_bfhw(vs_params.output_dims[output_id], output_tensor[0], output_tensor[1], output_tensor[2], output_tensor[3])]
);
output_tensor[3]++;
}
output_tensor[3] = 0;
output_tensor[2]++;
}
output_tensor[2] = 0;
output_tensor[1]++;
}
output_tensor[1] = 0;
output_tensor[0]++;
}
output_tensor = { 0, 0, 0, 0 };
input_it_tensor[vs_params.axis] += vs_params.variadic_lenghts[output_id];
}
}
protected:
virtual void TearDown() {
}
virtual void SetUp() {
try {
variadic_split_params p = ::testing::WithParamInterface<variadic_split_params>::GetParam();
// Fill weights data
auto fillBlob = [p](Blob::Ptr& weights) {
auto* data = weights->buffer().as<int64_t*>();
data[0] = p.axis;
size_t id = 1;
for (auto& variadic_lenght : p.variadic_lenghts)
{
data[id] = variadic_lenght;
id++;
}
};
// Allocate weights data for axis + variadic_lenghts vector
Blob::Ptr weights;
weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, { (1 + p.variadic_lenghts.size()) * sizeof(int64_t) }, Layout::C));
weights->allocate();
fill_data((float*)weights->buffer(), weights->size() / sizeof(float));
fillBlob(weights);
Core ie;
CNNNetwork net = ie.ReadNetwork(getModel(p), weights);
InputsDataMap in_info_map = net.getInputsInfo();
OutputsDataMap out_info_map = net.getOutputsInfo();
ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name );
InferRequest infer_request = executable_network.CreateInferRequest();
// Generate input data
Blob::Ptr inputBlob = infer_request.GetBlob(in_info_map.begin()->first);
float* src_ptr = inputBlob->buffer().as<float*>();
fill_data(src_ptr, inputBlob->size());
infer_request.Infer();
check_buffers_after_split(infer_request, in_info_map, out_info_map, p);
}
catch (const InferenceEngine::details::InferenceEngineException & e) {
FAIL() << e.what();
}
}
};