From 36f9d570233c3f3c983c2332f944065e08650ac1 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Tue, 10 Jan 2023 09:36:57 +0100 Subject: [PATCH] [GNA] Fixed transpose detection pattern (#14633) * Fixed transpose detection pattern * Added tests --- .../intel_gna/src/gna_graph_patterns.hpp | 4 +- .../add_transpose_detection.cpp | 129 ++++++++++++++++++ 2 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp diff --git a/src/plugins/intel_gna/src/gna_graph_patterns.hpp b/src/plugins/intel_gna/src/gna_graph_patterns.hpp index f8c687a412b..8cf8981a876 100644 --- a/src/plugins/intel_gna/src/gna_graph_patterns.hpp +++ b/src/plugins/intel_gna/src/gna_graph_patterns.hpp @@ -283,7 +283,9 @@ inline std::vector FindTranspositionInfoFromPrevLayers(Infere if (LayerInfo(layer).isEltwise()) { auto input1 = InferenceEngine::CNNNetPrevLayer(layer, 0); auto input2 = InferenceEngine::CNNNetPrevLayer(layer, 1); - if (LayerInfo(input1).isConst()) return findTranspositionInfoRecursive(input2); + if (LayerInfo(input1).isConst() || LayerInfo(input1).isInput()) { + return findTranspositionInfoRecursive(input2); + } return findTranspositionInfoRecursive(input1); } diff --git a/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp b/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp new file mode 100644 index 00000000000..1b8281e728e --- /dev/null +++ b/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp @@ -0,0 +1,129 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "openvino/opsets/opset9.hpp" + +using namespace ov::opset9; + +typedef std::tuple, // Configuration + std::vector, // Input Shape + ngraph::helpers::InputLayerType, // Type of Eltwise input + size_t> // Order of Eltwise input + + InputConvAddParams; + +namespace LayerTestsDefinitions { + +class InputConvAddTransposing : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + InferenceEngine::Precision precision; + std::string targetDevice; + std::map configuration; + std::vector input_shape; + ngraph::helpers::InputLayerType input_eltwise_type; + size_t input_eltwise_order; + + std::tie(precision, targetDevice, configuration, input_shape, input_eltwise_type, input_eltwise_order) = obj.param; + + std::ostringstream result; + result << "netPRC=" << precision.name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + + for (auto const& configItem : configuration) { + result << "_configItem=" << configItem.first << "_" << configItem.second; + } + result << "_inputShape=" << CommonTestUtils::vec2str(input_shape); + result << "_input_eltwise_type=" << input_eltwise_type; + result << "_input_eltwise_order=" << input_eltwise_order; + + return result.str(); + } + +protected: + const std::vector filter_size{1, 1}; + const std::vector strides{1, 1}; + const std::vector pads_begin{0, 0}; + const std::vector pads_end{0, 0}; + const std::vector dilations{1, 1}; + const ov::op::PadType pad_type = ov::op::PadType::EXPLICIT; + const size_t out_channels_num = 8; + const size_t c_index_in_nchw = 1; + + void SetUp() override { + InferenceEngine::Precision precision; + std::vector input_shape; + ngraph::helpers::InputLayerType input_eltwise_type; + size_t input_eltwise_order; + std::tie(precision, targetDevice, configuration, input_shape, input_eltwise_type, input_eltwise_order) = this->GetParam(); + + auto ng_precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision); + + auto input = std::make_shared(ng_precision, ov::Shape{input_shape}); + + const auto weights_size = ov::shape_size(filter_size) * out_channels_num * input_shape[c_index_in_nchw]; + auto weights_values = CommonTestUtils::generate_float_numbers(weights_size, -0.2f, 0.2f); + + auto convolution = ngraph::builder::makeConvolution(input, + ng_precision, + filter_size, + strides, + pads_begin, + pads_end, + dilations, + pad_type, + out_channels_num, + false, + weights_values); + std::shared_ptr add; + if (input_eltwise_type == ngraph::helpers::InputLayerType::CONSTANT) { + auto const_node = std::make_shared(ng_precision, ov::Shape{input_shape}); + add = (input_eltwise_order == 0) ? std::make_shared(const_node, convolution) : std::make_shared(convolution, const_node); + } else if (input_eltwise_type == ngraph::helpers::InputLayerType::PARAMETER) { + add = (input_eltwise_order == 0) ? std::make_shared(input, convolution) : std::make_shared(convolution, input); + } + auto res = std::make_shared(add); + function = std::make_shared(ov::ResultVector{res}, ov::ParameterVector{input}); + } +}; + +TEST_P(InputConvAddTransposing, CompareWithRefImpl) { + Run(); +}; + +const std::vector eltwise_input_types = { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER +}; + +const std::vector eltwise_input_order = {0, 1}; + +const InferenceEngine::Precision net_precisions{InferenceEngine::Precision::FP32}; + +const std::vector> configs = { + {{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}}, + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}} +}; + +const std::vector> input_shapes { + {1, 8, 32, 16} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_add_transpose_detection, + InputConvAddTransposing, + ::testing::Combine(::testing::Values(net_precisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(eltwise_input_types), + ::testing::ValuesIn(eltwise_input_order)), + InputConvAddTransposing::getTestCaseName); + + +} // namespace LayerTestsDefinitions