From f6dca869b2e475eb9c6070684f7aecf3a24905a2 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Sat, 26 Aug 2023 04:09:25 +0900 Subject: [PATCH] fix reduce perferred format selection and layout for partial shape (#19319) --- .../graph/graph_optimizer/reorder_inputs.cpp | 2 +- .../intel_gpu/src/graph/layout_optimizer.cpp | 10 +- ...smoke_test_reduce_deconvolution_concat.cpp | 137 ++++++++++++++++++ 3 files changed, 143 insertions(+), 6 deletions(-) create mode 100644 src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp index d235c6657dd..ab33b111ebb 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp @@ -781,7 +781,7 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf) auto new_format = lo.get_preferred_format(deconv_node); if (new_format == format::b_fs_zyx_fsv16 || new_format == format::bs_fs_zyx_bsv16_fsv16) { auto reorder = rf.get_reorder(input.id(), input_layout, - layout{ input_layout.data_type, new_format, input_layout.get_tensor() }); + layout{ input_layout.get_partial_shape(), input_layout.data_type, new_format }); if (reorder.first) { p.add_intermediate(reorder.first, deconv_node, 0, !reorder.second); } diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index baf34548c3c..d8193844598 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -1735,11 +1735,11 @@ format layout_optimizer::get_preferred_format(program_node& node) { } } else if (node.is_type()) { auto& reduce_node = node.as(); - auto input_layout = reduce_node.input().get_output_layout(); - if (!use_onednn_impls && input_layout.is_dynamic()) { - if (input_layout.format.dimension() > 4) { - expected = format::get_default_format(input_layout.format.dimension()); - } else if (input_layout.format.dimension() == 4) { + auto output_layout = reduce_node.get_output_layout(); + if (!use_onednn_impls && output_layout.is_dynamic()) { + if (output_layout.format.dimension() > 4) { + expected = format::get_default_format(output_layout.format.dimension()); + } else if (output_layout.format.dimension() == 4) { expected = format::any; } } diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp new file mode 100644 index 00000000000..176d2a2ebd1 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp @@ -0,0 +1,137 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include +#include +#include +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_layer/reduce_ops.hpp" +#include "shared_test_classes/single_layer/convolution_backprop_data.hpp" +#include +#include + +using namespace ngraph; +using namespace InferenceEngine; +using namespace ov::test; + +namespace GPULayerTestsDefinitions { + +typedef std::tuple< + std::vector, // input shapes + ElementType, // Network precision + TargetDevice, // Device name + std::map // Additional network configuration +> reduceDeconvConcatDynamicGPUTestParamsSet; + +const std::vector netPrecisions = { + ElementType::f16, +}; + +// Reduce should have preferred format for ouput layout +class ReduceDeconvConcatDynamicGPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + reduceDeconvConcatDynamicGPUTestParamsSet basicParamsSet = obj.param; + std::ostringstream result; + std::vector inputShapes; + ElementType netType; + TargetDevice targetDevice; + std::map additionalConfig; + + std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + result << "IS="; + for (const auto& shape : inputShapes) { + result << ov::test::utils::partialShape2str({shape.first}) << "_"; + for (const auto& actual_shape : shape.second) { + result << ov::test::utils::partialShape2str({actual_shape}) << "_"; + } + } + result << "NetType=" << netType << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); + } + +protected: + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (size_t i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; + ov::Tensor tensor; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), + targetInputStaticShapes[i], + 80, + 0, + 8); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } + } + + void SetUp() override { + reduceDeconvConcatDynamicGPUTestParamsSet basicParamsSet = this->GetParam(); + std::vector inputShapes; + ElementType netType; + std::map additionalConfig; + std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + + init_input_shapes(inputShapes); + const auto inShapeDeconv = inputDynamicShapes[0]; + const auto inShapeEReduce = inputDynamicShapes[1]; + auto params = builder::makeDynamicParams(netType, {inShapeDeconv, inShapeEReduce}); + auto paramOuts = helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + + auto deconvOp = ngraph::builder::makeConvolutionBackpropData(paramOuts[0], netType, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, + {0, 0, 0}, {1, 1, 1}, ov::op::PadType::EXPLICIT, 16); + deconvOp->set_friendly_name("deconv"); + + std::vector reduce_axes = {5}; + auto reduceAxesNode = std::dynamic_pointer_cast( + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({1}), reduce_axes)); + auto reduceOp = ngraph::builder::makeReduce(paramOuts[1], reduceAxesNode, false, ngraph::helpers::ReductionType::Max); + reduceOp->set_friendly_name("reduce"); + + auto concatOp = ngraph::builder::makeConcat({deconvOp, reduceOp}, 1); + concatOp->set_friendly_name("concat"); + + std::vector transpose_order = {0, 1, 2, 4, 3}; + auto transposeOrderNode = std::dynamic_pointer_cast( + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({5}), transpose_order)); + auto transposeOp = std::make_shared(concatOp, transposeOrderNode); + transposeOp->set_friendly_name("transpose"); + + ngraph::ResultVector results = {std::make_shared(transposeOp)}; + function = std::make_shared(results, params, "transpose_out"); + } +}; + + +TEST_P(ReduceDeconvConcatDynamicGPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + run(); +} + +namespace { +std::map emptyAdditionalConfig; +const std::vector> dynInputShapes = { + { + // Input for Deconv + {{1, 32, 64, ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 32, 64, 64, 64}}}, + // Input for Reduce + {{1, 8, 128, ov::Dimension::dynamic(), ov::Dimension::dynamic(), 4}, {{1, 8, 128, 128, 128, 4}}} + } +}; + + +const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), + ::testing::ValuesIn(netPrecisions), // netprec + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(emptyAdditionalConfig)); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_reduce_deconv_concat, ReduceDeconvConcatDynamicGPUTest, + testParams_smoke, ReduceDeconvConcatDynamicGPUTest::getTestCaseName); +} // namespace +} // namespace GPULayerTestsDefinitions