diff --git a/src/plugins/intel_gpu/tests/test_cases/concatenation_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/concatenation_gpu_test.cpp index 19429192cd9..13b841f8cb0 100644 --- a/src/plugins/intel_gpu/tests/test_cases/concatenation_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/concatenation_gpu_test.cpp @@ -917,6 +917,8 @@ TEST_P(concat_implicit_gpu_4d_i8, input_order_opt_b_fs_yx_fsv32) { #ifdef ENABLE_ONEDNN_FOR_GPU TEST(concat_gpu_onednn, basic_input_types) { auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; auto input0 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } }); auto input1 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } }); diff --git a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp index 16de31dc44d..09959f65d67 100644 --- a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp @@ -8847,6 +8847,8 @@ INSTANTIATE_TEST_SUITE_P(conv_onednn_cases, TEST_P(convolution_gpu_onednn, conv_onednn_cases) { auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; if (!engine.get_device_info().supports_fp16) { @@ -8988,6 +8990,8 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) { TEST(convolution_gpu_onednn, padding_for_cldnn_kernel_after_onednn) { auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; int input_b = 1, input_f = 16, input_y = 3, input_x = 3; int output_b = 1, output_f = 16, output_y = 6, output_x = 6; diff --git a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp index 37941cbfb5f..2190f0f54d3 100644 --- a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp @@ -2858,6 +2858,8 @@ TEST(deconvolution_f32_fw_gpu_onednn, basic_wsiz2x2_in2x2x1x1_stride2_nopad) { // Stride : 2x2 auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; auto input = engine.allocate_memory({ data_types::f32, format::yxfb, { 1, 1, 2, 2 } }); auto weights = engine.allocate_memory({ data_types::f32, format::oiyx, { 1, 1, 2, 2 } }); diff --git a/src/plugins/intel_gpu/tests/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/fully_connected_gpu_test.cpp index 2a70ebb9f14..260012d4cf8 100644 --- a/src/plugins/intel_gpu/tests/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/fully_connected_gpu_test.cpp @@ -1677,6 +1677,8 @@ TEST(fully_connected_onednn_gpu, no_biases_int8) { weight_b = 4, weight_x = 3; // size of the whole weights buffer auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; // Change input data of fully-connected node from bx to bf auto input_prim = engine.allocate_memory({ data_types::f32, format::bfyx, { input_b, 1, input_x, 1 } }); diff --git a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp index c56b4cbf256..7bc634326fd 100644 --- a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp @@ -3776,6 +3776,8 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_POOLING, #ifdef ENABLE_ONEDNN_FOR_GPU TEST(pooling_forward_gpu_onednn, basic_max_pooling_int8) { auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; layout in_layout = { type_to_data_type::value, format::byxf, { 1, 1, 3, 3 } }; layout out_layout = { type_to_data_type::value, format::byxf, { 1, 1, 1, 1 } }; layout byte_layout = { type_to_data_type::value, format::bfyx, { 1, 1, 3, 3 } }; diff --git a/src/plugins/intel_gpu/tests/test_cases/reduce_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/reduce_gpu_test.cpp index 545a2a2104c..10eed580425 100644 --- a/src/plugins/intel_gpu/tests/test_cases/reduce_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/reduce_gpu_test.cpp @@ -1848,6 +1848,8 @@ protected: public: void execute_onednn() { + if (!engine.get_device_info().supports_immad) + return; int input_dim = static_cast(input_format.dimension()); cldnn::format layout_format = input_format; diff --git a/src/plugins/intel_gpu/tests/test_cases/reorder_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/reorder_gpu_test.cpp index cd048816928..f54205eccb6 100644 --- a/src/plugins/intel_gpu/tests/test_cases/reorder_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/reorder_gpu_test.cpp @@ -2581,6 +2581,8 @@ TEST_P(testing_removal_reorder, only_remove_reorder_shallow_depth_input) { #ifdef ENABLE_ONEDNN_FOR_GPU // Check to remove reorder between onednn and cldnn conv if the reorder has no padded output TEST_P(testing_removal_reorder, removal_no_padded_reorder) { + if (!engine.get_device_info().supports_immad) + return; auto p = GetParam(); layout reorder_layout(data_types::f16, format::b_fs_yx_fsv16, p.in_shape, padding({0, }, 0)); @@ -2603,14 +2605,13 @@ TEST_P(testing_removal_reorder, removal_no_padded_reorder) { execute(p); - if (!check_supports_immad()) - return; - EXPECT_EQ(check_optimized_out(p, "reorder_conv"), true); } // Check not to remove reorder between onednn and cldnn conv if the reorder has padded output TEST_P(testing_removal_reorder, removal_padded_reorder) { + if (!engine.get_device_info().supports_immad) + return; auto p = GetParam(); layout reorder_layout(data_types::f16, format::b_fs_yx_fsv16, p.in_shape, padding({0, 0, 1, 1}, 0)); @@ -2633,9 +2634,6 @@ TEST_P(testing_removal_reorder, removal_padded_reorder) { execute(p); - if (!check_supports_immad()) - return; - EXPECT_EQ(check_optimized_out(p, "reorder_conv"), false); } #endif // ENABLE_ONEDNN_FOR_GPU @@ -2650,6 +2648,8 @@ INSTANTIATE_TEST_SUITE_P(reorder_gpu_testing, testing_removal_reorder, #ifdef ENABLE_ONEDNN_FOR_GPU TEST(reorder_onednn_gpu, basic_convert_int8) { auto& engine = get_onednn_test_engine(); + if (!engine.get_device_info().supports_immad) + return; layout in_layout = { type_to_data_type::value, format::byxf, { 1, 1, 3, 3 } }; layout byte_layout = { type_to_data_type::value, format::bfyx, { 1, 1, 3, 3 } }; std::initializer_list input_f = { 1.0f, -2.6f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f };