From e76fc14ae11b6f7e933658b1a7b2172c43f560f6 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Sat, 27 Nov 2021 15:08:06 +0300 Subject: [PATCH] [GPU] Use b_fs_yx_fsv16 format for OneDNN convolutins in case of FP32 output (#8808) --- .../thirdparty/clDNN/src/layout_optimizer.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp index 7961d8f4de3..59c87e3d28d 100644 --- a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp +++ b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp @@ -876,6 +876,18 @@ layout layout_optimizer::get_expected_layout(layout const& current_layout, } if (use_onednn_impls) { + std::function has_any_convolutions_below; + has_any_convolutions_below = [&](const program_node& node) -> bool { + if (node.get_users().empty()) + return false; + for (auto& usr : node.get_users()) { + if (usr->is_type()) + return true; + return has_any_convolutions_below(*usr); + } + return false; + }; + /* ***************************** OneDNN impls format selection part ****************************** */ bool valid_grouped = !is_dw && prim->groups > 1 && (ofm_per_group % compute_block == 0 && ifm_per_group % compute_block == 0); if (i8_u8_input) { @@ -883,7 +895,12 @@ layout layout_optimizer::get_expected_layout(layout const& current_layout, if (input_layout.size.batch[0] % 16 == 0) { expected_format = cldnn::format::bs_fs_yx_bsv32_fsv32; } else { - expected_format = cldnn::format::b_fs_yx_fsv32; + if (data_type_traits::is_floating_point(output_layout.data_type) && + !has_any_convolutions_below(node)) { + expected_format = cldnn::format::b_fs_yx_fsv16; + } else { + expected_format = cldnn::format::b_fs_yx_fsv32; + } } } else if ((_optimization_attributes.b_fs_yx_fsv16_network && convolution_b_fs_yx_fsv16_opt(input_layout, output_layout, weights_layout, prim)) && is_2d) {