diff --git a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp index 037dc357653..8ec4a690cab 100644 --- a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp +++ b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp @@ -309,7 +309,17 @@ class ScaleFactorPerLayer { case InferenceEngine::EltwiseLayer::Sub: case InferenceEngine::EltwiseLayer::Sum: { // detect which input will be used as biases - if (LayerInfo(in0).has32BOutput()) { + auto findPrevFunctional = [](InferenceEngine::CNNLayerPtr layer) { + auto prev = InferenceEngine::CNNNetPrevLayer(layer, 0); + while (CNNNetHasPrevLayer(prev.get(), 0) && LayerInfo(prev).isNonFunctional()) { + prev = InferenceEngine::CNNNetPrevLayer(prev, 0); + } + + return prev; + }; + + if (LayerInfo(in0).has32BOutput() || + (LayerInfo(in0).isNonFunctional() && CNNNetHasPrevLayer(in0.get(), 0) && LayerInfo(findPrevFunctional(in0)).has32BOutput())) { std::swap(in0, in1); std::swap(quantParams0, quantParams1); } diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp new file mode 100644 index 00000000000..2d90d3f8683 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_constants.hpp" +#include "subgraph_tests/matmul_squeeze_add.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> configs = { + { } +}; + +std::vector> input_shapes = { + {1, 8}, + {1, 42}, + {1, 100}, + {1, 128}, + {1, 512} +}; + +std::vector output_sizes = { + 1000, + 512, + 128, + 42, + 16, + 8 +}; + +INSTANTIATE_TEST_CASE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp new file mode 100644 index 00000000000..ed95ba38486 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -0,0 +1,50 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_constants.hpp" +#include "subgraph_tests/matmul_squeeze_add.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> configs = { + { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "81.9175"} + } +}; + +std::vector> input_shapes = { + {1, 8}, + {1, 42}, + {1, 100}, + {1, 128}, + {1, 512} +}; + +std::vector output_sizes = { + 1000, + 512, + 128, + 42, + 16, + 8 +}; + +INSTANTIATE_TEST_CASE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp new file mode 100644 index 00000000000..5494c69993d --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_constants.hpp" +#include "subgraph_tests/matmul_squeeze_add.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> configs = { + { } +}; + +std::vector> input_shapes = { + {1, 8}, + {1, 42}, + {1, 100}, + {1, 128}, + {1, 512} +}; + +std::vector output_sizes = { + 1000, + 512, + 128, + 42, + 16, + 8 +}; + +INSTANTIATE_TEST_CASE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp new file mode 100644 index 00000000000..ca2bbaf4812 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +typedef std::tuple< + InferenceEngine::Precision, // Network Precision + std::string, // Target Device + std::map, // Configuration + std::vector, // Input Shapes + size_t // Output Size +> matmulSqueezeAddParams; + +namespace LayerTestsDefinitions { + +class MatmulSqueezeAddTest : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/matmul_squeeze_add.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/matmul_squeeze_add.cpp new file mode 100644 index 00000000000..9ef0b93e774 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/matmul_squeeze_add.cpp @@ -0,0 +1,91 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "functional_test_utils/layer_test_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "ngraph_functions/pass/convert_prc.hpp" + +#include "subgraph_tests/matmul_squeeze_add.hpp" + +#include "ngraph_functions/builders.hpp" + +namespace LayerTestsDefinitions { + +std::string MatmulSqueezeAddTest::getTestCaseName(testing::TestParamInfo obj) { + InferenceEngine::Precision netPrecision; + std::vector inputShape; + std::size_t outputSize; + std::string targetDevice; + std::map configuration; + std::tie(netPrecision, targetDevice, configuration, inputShape, outputSize) = obj.param; + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "OS=" << outputSize << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetDevice; + for (auto const& configItem : configuration) { + result << "_configItem=" << configItem.first << "_" << configItem.second; + } + return result.str(); +} + +void MatmulSqueezeAddTest::SetUp() { + auto generateFloatNumbers = [](float startFrom, float upTo, std::size_t vec_len) { + std::vector res; + + std::mt19937 gen( + static_cast(std::chrono::high_resolution_clock::now().time_since_epoch().count())); + + std::uniform_real_distribution dist(startFrom, upTo); + + for (int i = 0; i < vec_len; i++) + res.emplace_back(static_cast(dist(gen))); + + return res; + }; + + InferenceEngine::Precision netPrecision; + std::map tempConfig; + std::vector inputShape; + size_t outputSize; + std::tie(netPrecision, targetDevice, tempConfig, inputShape, outputSize) = this->GetParam(); + configuration.insert(tempConfig.begin(), tempConfig.end()); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + auto params = ngraph::builder::makeParams(ngPrc, { inputShape }); + + auto constant_0 = ngraph::builder::makeConstant(ngPrc, { outputSize, inputShape[1] }, + generateFloatNumbers(0, 1, outputSize * inputShape[1]), false); + auto matmul_0 = std::make_shared(params[0], constant_0, false, true); + + auto constant_1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector{0}); + auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); + + auto constant_2 = ngraph::builder::makeConstant(ngPrc, { 1, inputShape[0], outputSize }, + generateFloatNumbers(0, 1, inputShape[0] * outputSize), false); + auto add_0 = std::make_shared(unsqueeze_0, constant_2); + + auto constant_3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector{0}); + auto squeeze_0 = std::make_shared(add_0, constant_3); + + ngraph::ResultVector results {std::make_shared(squeeze_0)}; + function = std::make_shared(results, params, "MatmulSqueezeAddTest"); +} + +TEST_P(MatmulSqueezeAddTest, CompareWithRefImpl) { + Run(); +}; + +} // namespace LayerTestsDefinitions