From f3c8f2bc49b34e016daf8805e1f2568f04a31c6a Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Thu, 30 Sep 2021 13:15:40 +0300 Subject: [PATCH] [GPU] Add use OneDNN impls attribute (#7716) --- .../thirdparty/clDNN/src/include/layout_optimizer.h | 4 +++- inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp | 5 ++++- inference-engine/thirdparty/clDNN/src/program.cpp | 6 ++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h b/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h index 4681df52800..10e1bd0a215 100644 --- a/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h +++ b/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h @@ -90,7 +90,8 @@ public: b_fs_zyx_fsv32_network, b_fs_yx_fsv16_network, b_fs_zyx_fsv16_network, - bs_fs_yx_bsv16_fsv16_network + bs_fs_yx_bsv16_fsv16_network, + use_onednn_impls }; struct optimization_attributes { @@ -103,6 +104,7 @@ public: int32_t b_fs_yx_fsv16_network = 0; int32_t b_fs_zyx_fsv16_network = 0; int32_t bs_fs_yx_bsv16_fsv16_network = 0; + int32_t use_onednn_impls = 0; }; private: diff --git a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp index 0274cf730f0..b54638404de 100644 --- a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp +++ b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp @@ -843,7 +843,7 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node) { const size_t kKeyValue = kBatchNum * std::min(kClassNum, static_cast(8)) * kNStreams; preferred_impl = (kKeyValue > 64) ? impl_types::ocl : impl_types::cpu; } else if (node.is_type()) { - if (!node.get_program().get_engine().get_device_info().supports_immad) + if (!_optimization_attributes.use_onednn_impls) return impl_types::ocl; std::vector onednn_optimized_fmt = { @@ -974,6 +974,9 @@ void layout_optimizer::set_optimization_attribute(optimization_attributes_type a case optimization_attributes_type::bs_fs_yx_bsv16_fsv16_network: _optimization_attributes.bs_fs_yx_bsv16_fsv16_network = val; break; + case optimization_attributes_type::use_onednn_impls: + _optimization_attributes.use_onednn_impls = val; + break; default: throw std::out_of_range("unsupported layout optimization attribute"); } diff --git a/inference-engine/thirdparty/clDNN/src/program.cpp b/inference-engine/thirdparty/clDNN/src/program.cpp index c92f5a0995d..bc8d7ef4041 100644 --- a/inference-engine/thirdparty/clDNN/src/program.cpp +++ b/inference-engine/thirdparty/clDNN/src/program.cpp @@ -1382,4 +1382,10 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) { if (should_use_bs_fs_yx_bsv16_fsv16) lo.set_optimization_attribute(layout_optimizer::optimization_attributes_type::bs_fs_yx_bsv16_fsv16_network, 1); + +#ifdef ENABLE_ONEDNN_FOR_GPU + auto& engine = get_engine(); + if (engine.get_device_info().supports_immad && engine.configuration().queue_type == queue_types::in_order) + lo.set_optimization_attribute(layout_optimizer::optimization_attributes_type::use_onednn_impls, 1); +#endif }