diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index 679c7a5eaad..8f1d0e4c93f 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -291,7 +291,7 @@ void prepare_primitive_fusing::fuse_activations(program &p) { } if (use_onednn_impls) { - if (input.is_type()) + if (input.is_type() || input.is_type()) return; #ifdef ENABLE_ONEDNN_FOR_GPU // Activation should not be fused if it isn't supported in onednn diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index 027aa465d59..d60332818a8 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -1284,7 +1284,13 @@ impl_types layout_optimizer::get_forced_impl_type_by_config(program_node& node) return impl_types::ocl; else if (forced_impl_type == "reduce:onednn") return impl_types::onednn; - } + } else if (node.is_type()) { + if (forced_impl_type == "concat:ocl") + return impl_types::ocl; + else if (forced_impl_type == "concat:onednn") + return impl_types::onednn; + } + // Forcing one layer size_t found_type = forced_impl_type.rfind(":"); diff --git a/src/plugins/intel_gpu/tests/fusions/concatenate_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/concatenate_fusion_test.cpp index 071f6169df4..9c00d05281c 100644 --- a/src/plugins/intel_gpu/tests/fusions/concatenate_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/concatenate_fusion_test.cpp @@ -136,7 +136,7 @@ TEST_P(concat_onednn_eltwise, along_f) { } INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_onednn_activation, ::testing::ValuesIn(std::vector{ - concat_test_params{ CASE_CONCAT_F16_1, 3, 3, "" }, + concat_test_params{ CASE_CONCAT_F16_1, 4, 4, "" }, })); INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_onednn_eltwise, ::testing::ValuesIn(std::vector{ diff --git a/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp index 510098412b3..d4ced95e7c3 100644 --- a/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp @@ -1118,6 +1118,7 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_fp32_multi_eltwise_quantization, ::te class conv_fp32_multi_eltwise_concat : public ConvFusingTest {}; TEST_P(conv_fp32_multi_eltwise_concat, basic) { auto p = GetParam(); + data_types output_type = data_types::i8; create_topologies( input_layout("input", get_input_layout(p)), data("eltwise_data1", get_mem(get_output_layout(p))), @@ -1129,15 +1130,15 @@ TEST_P(conv_fp32_multi_eltwise_concat, basic) { eltwise("eltwise2", "conv_prim", "eltwise_data2", eltwise_mode::sum), concatenation("concat", { "eltwise1", "eltwise2" }, - 1, - data_types::i8, + 2, + output_type, padding{ { 0, 0, 0, 0 }, 0 }), reorder("reorder_bfyx", "concat", p.default_format, data_types::f32) ); implementation_desc conv_impl = { format::b_fs_yx_fsv16, "" }; bo_fused.set_option(build_option::force_implementations({ { "conv_prim", conv_impl } })); - tolerance = default_tolerance(p.default_type); + tolerance = default_tolerance(output_type); execute(p); }