From 2caca604ca409bf0fb73507a8759e2078b204c43 Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Tue, 11 Aug 2020 14:52:04 +0300 Subject: [PATCH] [IE CLDNN] Fix reshape for yxfb layout (#1632) In one of the network it was the following pipeline: ``` FullyConnected -> Reshape -> FullyConnected ``` And the output of Reshape wasn't in the same order as input for this layer. I found that the problem was connected with format of the layers. During optimization passes this pipeline was transformed to the following: ``` FullyConnected -> Reorder -> Reshape -> Reorder -> FullyConnected ``` Both `FullyConnected` layers works with `yxfb` format. This is why Reorder layer after the Reshape has output layout with format `yxfb` and `reshape_in_layout.format` returns `yxfb` format. But in this case we have to convert Reshape to `bfyx` format because in this case we won't change the order of elements. I replaced `reshape_in_layout.format` (which returns `yxfb`) and explicitly set `bfyx` format. JIRA: 35288 --- .../clDNN/src/graph_optimizer/handle_reshape.cpp | 8 +++++++- .../clDNN/tests/test_cases/reshape_gpu_test.cpp | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/inference-engine/thirdparty/clDNN/src/graph_optimizer/handle_reshape.cpp b/inference-engine/thirdparty/clDNN/src/graph_optimizer/handle_reshape.cpp index 80adb1d1d12..cf0ecf6399e 100644 --- a/inference-engine/thirdparty/clDNN/src/graph_optimizer/handle_reshape.cpp +++ b/inference-engine/thirdparty/clDNN/src/graph_optimizer/handle_reshape.cpp @@ -125,10 +125,16 @@ void handle_reshape::run(program_impl& p) { for (const auto& reorder_node : reorder_node_to_split) { auto& reorder_reshape_node = reorder_reshape_nodes[reshape_reorder_id]; auto reshape_in_layout = reorder_node->get_output_layout(); + auto dims = cldnn::format::dimension(reshape_in_layout.format); + auto format = cldnn::format::bfyx; + if (dims == 5) + format = cldnn::format::bfzyx; + else if (dims == 6) + format = cldnn::format::bfwzyx; auto reshape_input = std::make_shared( "reorder:_reshape_input_" + reorder_node->id() + "_" + reorder_reshape_node->id(), input_node.id(), - reshape_in_layout.format, + format, reshape_in_layout.data_type); auto& reshape_input_node = p.get_or_create(reshape_input); p.add_intermediate(reshape_input_node, diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp index df98f60bd64..a4bc8272e03 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp @@ -465,7 +465,7 @@ TEST(reshape_gpu_f32, multiple_users_with_reorder) { topology.add(activation("relu2", "reshape", activation_func::relu)); std::vector input_vec = {-1.f, 2.f, -3.f, 4.f}; - std::vector out1 = {0.f, 0.f, 2.f, 4.0f}; + std::vector out1 = {0.f, 2.f, 0.f, 4.0f}; std::vector out2 = {0.f, 2.f, 0.f, 4.0f}; set_values(input, input_vec);