diff --git a/src/plugins/intel_gpu/tests/fusions/activation_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/activation_fusion_test.cpp index 74014d1b690..52b3b89d74f 100644 --- a/src/plugins/intel_gpu/tests/fusions/activation_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/activation_fusion_test.cpp @@ -174,6 +174,9 @@ TEST_P(activation_eltwise_activation_quantize_u8, basic) { quantize("quant", "act2", "in_low", "in_high", "out_low", "out_high", 256, data_types::u8), reorder("reorder_bfyx", "quant", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support softsign activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1.f; execute(p); @@ -194,6 +197,9 @@ TEST_P(activation_eltwise_activation_quantize_u8, per_channel) { quantize("quant", "act2", "in_low", "in_high", "out_low", "out_high", 256, data_types::u8), reorder("reorder_bfyx", "quant", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support softsign activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1.f; execute(p); diff --git a/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp index 01ba4ddd7d9..2ceee0a77f7 100644 --- a/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/convolution_fusion_test.cpp @@ -1843,6 +1843,9 @@ TEST_P(conv_int8_activation_eltwise_quantize, fsv16) { // TODO Add 5D int8 optimized convolution implementations return; } + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives += 2; tolerance = 1.f; execute(p); @@ -1873,6 +1876,9 @@ TEST_P(conv_int8_activation_eltwise_quantize, fsv32) { // TODO Add 5D int8 optimized convolution implementations return; } + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives += 2; tolerance = 1.f; execute(p); @@ -1914,6 +1920,9 @@ TEST_P(conv_int8_activation_eltwise, fsv16) { // TODO Add 5D int8 optimized convolution implementations return; } + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives += 2; tolerance = 1e-5f; execute(p); @@ -1939,6 +1948,9 @@ TEST_P(conv_int8_activation_eltwise, fsv32) { // TODO Add 5D int8 optimized convolution implementations return; } + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives += 2; tolerance = 1e-5f; execute(p); diff --git a/src/plugins/intel_gpu/tests/fusions/deconvolution_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/deconvolution_fusion_test.cpp index 6cb006aabe5..0e27911035d 100644 --- a/src/plugins/intel_gpu/tests/fusions/deconvolution_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/deconvolution_fusion_test.cpp @@ -546,9 +546,8 @@ TEST_P(deconv_scale_actv_quant_i8, basic) { quantize("quant", "actv", "in_lo", "in_hi", "out_lo", "out_hi", 255, data_types::i8), reorder("out", "quant", p.default_format, data_types::f32) ); - - //Activation won't be fused because onednn doesn't support softsign activation - if(engine.get_device_info().supports_immad) + // Activation won't be fused because onednn doesn't support softsign activation + if (engine.get_device_info().supports_immad) p.expected_fused_primitives++; tolerance = 1.f; diff --git a/src/plugins/intel_gpu/tests/fusions/eltwise_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/eltwise_fusion_test.cpp index bd2628291d0..af8972c19de 100644 --- a/src/plugins/intel_gpu/tests/fusions/eltwise_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/eltwise_fusion_test.cpp @@ -187,6 +187,9 @@ TEST_P(eltwise_const_path, not_fuse_to_const_eltwise) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1e-5f; execute(p); @@ -213,6 +216,9 @@ TEST_P(eltwise_fp32_fsv16, add) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); @@ -232,6 +238,9 @@ TEST_P(eltwise_fp32_fsv16, add_per_element) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); @@ -257,6 +266,9 @@ TEST_P(eltwise_fp32_fsv32, add) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::fs_b_yx_fsv32, "eltwise_fs_b_yx_fsv32" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); @@ -276,6 +288,9 @@ TEST_P(eltwise_fp32_fsv32, add_per_element) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::fs_b_yx_fsv32, "eltwise_fs_b_yx_fsv32" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); @@ -302,6 +317,9 @@ TEST_P(eltwise_fp32_fsv4, add) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::b_fs_yx_fsv4, "eltwise_b_fs_yx_fsv4" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); @@ -321,6 +339,9 @@ TEST_P(eltwise_fp32_fsv4, add_per_element) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::b_fs_yx_fsv4, "eltwise_b_fs_yx_fsv4" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); @@ -449,6 +470,9 @@ TEST_P(eltwise_fp16_byxf, add) { activation("activation", "add", activation_func::negative), reorder("out", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; implementation_desc eltw_impl = { format::byxf, "generic_eltwise_ref" }; bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } })); diff --git a/src/plugins/intel_gpu/tests/fusions/gemm_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/gemm_fusion_test.cpp index 2f5f9c787d6..c6c2b3bd2d4 100644 --- a/src/plugins/intel_gpu/tests/fusions/gemm_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/gemm_fusion_test.cpp @@ -339,6 +339,9 @@ TEST_P(gemm_2in_act_scale_eltwise, basic) { eltwise("sum", { "activation", "eltwise_data" }, eltwise_mode::sum, data_types::f32), reorder("reorder_bfyx", "sum", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives += 2; tolerance = 1e-4f; execute(p); @@ -357,6 +360,9 @@ TEST_P(gemm_2in_act_scale_eltwise, broadcast_eltwise) { eltwise("sum", { "activation", "eltwise_data" }, eltwise_mode::sum, data_types::f32), reorder("reorder_bfyx", "sum", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support negative activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives += 2; tolerance = 1e-4f; execute(p); diff --git a/src/plugins/intel_gpu/tests/fusions/lrn_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/lrn_fusion_test.cpp index 8af69779e51..2927e1a7124 100644 --- a/src/plugins/intel_gpu/tests/fusions/lrn_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/lrn_fusion_test.cpp @@ -120,6 +120,9 @@ TEST_P(lrn_fp32_quantize_u8_eltwise_activation, basic) { activation("activation", "eltwise", activation_func::floor), reorder("reorder", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support floor activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1.0f; execute(p); diff --git a/src/plugins/intel_gpu/tests/fusions/pooling_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/pooling_fusion_test.cpp index ce87dfda227..00e86e44b60 100644 --- a/src/plugins/intel_gpu/tests/fusions/pooling_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/pooling_fusion_test.cpp @@ -308,6 +308,9 @@ TEST_P(pooling_scale_activation_quantize, per_channel) { quantize("quantize", "activation", "in_lo", "in_hi", "out_lo", "out_hi", 255, data_types::u8), reorder("output_reorder", "quantize", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support atan activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1.0f; execute(p); diff --git a/src/plugins/intel_gpu/tests/fusions/reduce_fusion_test.cpp b/src/plugins/intel_gpu/tests/fusions/reduce_fusion_test.cpp index 4f6fcf1bc2e..d264cf7d2c9 100644 --- a/src/plugins/intel_gpu/tests/fusions/reduce_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/fusions/reduce_fusion_test.cpp @@ -242,6 +242,9 @@ TEST_P(reduce_scale_activation, basic) { activation("activation", "scale", activation_func::cos), reorder("output_reorder", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support cos activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1e-02f; execute(p); @@ -257,6 +260,9 @@ TEST_P(reduce_scale_activation, per_channel) { activation("activation", "scale", activation_func::cos), reorder("output_reorder", "activation", p.default_format, data_types::f32) ); + // Activation won't be fused because onednn doesn't support cos activation + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; tolerance = 1e-02f; execute(p);