[GPU] Run onednn test only in DG2 (#12892)

This commit is contained in:
Felix Dohyun Kim 2022-09-06 09:02:15 +09:00 committed by GitHub
parent c653638cd5
commit 2108fe0dfc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 20 additions and 6 deletions

View File

@ -917,6 +917,8 @@ TEST_P(concat_implicit_gpu_4d_i8, input_order_opt_b_fs_yx_fsv32) {
#ifdef ENABLE_ONEDNN_FOR_GPU #ifdef ENABLE_ONEDNN_FOR_GPU
TEST(concat_gpu_onednn, basic_input_types) { TEST(concat_gpu_onednn, basic_input_types) {
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
auto input0 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } }); auto input0 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
auto input1 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } }); auto input1 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });

View File

@ -8847,6 +8847,8 @@ INSTANTIATE_TEST_SUITE_P(conv_onednn_cases,
TEST_P(convolution_gpu_onednn, conv_onednn_cases) { TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
if (!engine.get_device_info().supports_fp16) if (!engine.get_device_info().supports_fp16)
{ {
@ -8988,6 +8990,8 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
TEST(convolution_gpu_onednn, padding_for_cldnn_kernel_after_onednn) { TEST(convolution_gpu_onednn, padding_for_cldnn_kernel_after_onednn) {
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
int input_b = 1, input_f = 16, input_y = 3, input_x = 3; int input_b = 1, input_f = 16, input_y = 3, input_x = 3;
int output_b = 1, output_f = 16, output_y = 6, output_x = 6; int output_b = 1, output_f = 16, output_y = 6, output_x = 6;

View File

@ -2858,6 +2858,8 @@ TEST(deconvolution_f32_fw_gpu_onednn, basic_wsiz2x2_in2x2x1x1_stride2_nopad) {
// Stride : 2x2 // Stride : 2x2
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
auto input = engine.allocate_memory({ data_types::f32, format::yxfb, { 1, 1, 2, 2 } }); auto input = engine.allocate_memory({ data_types::f32, format::yxfb, { 1, 1, 2, 2 } });
auto weights = engine.allocate_memory({ data_types::f32, format::oiyx, { 1, 1, 2, 2 } }); auto weights = engine.allocate_memory({ data_types::f32, format::oiyx, { 1, 1, 2, 2 } });

View File

@ -1677,6 +1677,8 @@ TEST(fully_connected_onednn_gpu, no_biases_int8) {
weight_b = 4, weight_x = 3; // size of the whole weights buffer weight_b = 4, weight_x = 3; // size of the whole weights buffer
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
// Change input data of fully-connected node from bx to bf // Change input data of fully-connected node from bx to bf
auto input_prim = engine.allocate_memory({ data_types::f32, format::bfyx, { input_b, 1, input_x, 1 } }); auto input_prim = engine.allocate_memory({ data_types::f32, format::bfyx, { input_b, 1, input_x, 1 } });

View File

@ -3776,6 +3776,8 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_POOLING,
#ifdef ENABLE_ONEDNN_FOR_GPU #ifdef ENABLE_ONEDNN_FOR_GPU
TEST(pooling_forward_gpu_onednn, basic_max_pooling_int8) { TEST(pooling_forward_gpu_onednn, basic_max_pooling_int8) {
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
layout in_layout = { type_to_data_type<float>::value, format::byxf, { 1, 1, 3, 3 } }; layout in_layout = { type_to_data_type<float>::value, format::byxf, { 1, 1, 3, 3 } };
layout out_layout = { type_to_data_type<float>::value, format::byxf, { 1, 1, 1, 1 } }; layout out_layout = { type_to_data_type<float>::value, format::byxf, { 1, 1, 1, 1 } };
layout byte_layout = { type_to_data_type<int8_t>::value, format::bfyx, { 1, 1, 3, 3 } }; layout byte_layout = { type_to_data_type<int8_t>::value, format::bfyx, { 1, 1, 3, 3 } };

View File

@ -1848,6 +1848,8 @@ protected:
public: public:
void execute_onednn() { void execute_onednn() {
if (!engine.get_device_info().supports_immad)
return;
int input_dim = static_cast<int>(input_format.dimension()); int input_dim = static_cast<int>(input_format.dimension());
cldnn::format layout_format = input_format; cldnn::format layout_format = input_format;

View File

@ -2581,6 +2581,8 @@ TEST_P(testing_removal_reorder, only_remove_reorder_shallow_depth_input) {
#ifdef ENABLE_ONEDNN_FOR_GPU #ifdef ENABLE_ONEDNN_FOR_GPU
// Check to remove reorder between onednn and cldnn conv if the reorder has no padded output // Check to remove reorder between onednn and cldnn conv if the reorder has no padded output
TEST_P(testing_removal_reorder, removal_no_padded_reorder) { TEST_P(testing_removal_reorder, removal_no_padded_reorder) {
if (!engine.get_device_info().supports_immad)
return;
auto p = GetParam(); auto p = GetParam();
layout reorder_layout(data_types::f16, format::b_fs_yx_fsv16, p.in_shape, padding({0, }, 0)); layout reorder_layout(data_types::f16, format::b_fs_yx_fsv16, p.in_shape, padding({0, }, 0));
@ -2603,14 +2605,13 @@ TEST_P(testing_removal_reorder, removal_no_padded_reorder) {
execute(p); execute(p);
if (!check_supports_immad())
return;
EXPECT_EQ(check_optimized_out(p, "reorder_conv"), true); EXPECT_EQ(check_optimized_out(p, "reorder_conv"), true);
} }
// Check not to remove reorder between onednn and cldnn conv if the reorder has padded output // Check not to remove reorder between onednn and cldnn conv if the reorder has padded output
TEST_P(testing_removal_reorder, removal_padded_reorder) { TEST_P(testing_removal_reorder, removal_padded_reorder) {
if (!engine.get_device_info().supports_immad)
return;
auto p = GetParam(); auto p = GetParam();
layout reorder_layout(data_types::f16, format::b_fs_yx_fsv16, p.in_shape, padding({0, 0, 1, 1}, 0)); layout reorder_layout(data_types::f16, format::b_fs_yx_fsv16, p.in_shape, padding({0, 0, 1, 1}, 0));
@ -2633,9 +2634,6 @@ TEST_P(testing_removal_reorder, removal_padded_reorder) {
execute(p); execute(p);
if (!check_supports_immad())
return;
EXPECT_EQ(check_optimized_out(p, "reorder_conv"), false); EXPECT_EQ(check_optimized_out(p, "reorder_conv"), false);
} }
#endif // ENABLE_ONEDNN_FOR_GPU #endif // ENABLE_ONEDNN_FOR_GPU
@ -2650,6 +2648,8 @@ INSTANTIATE_TEST_SUITE_P(reorder_gpu_testing, testing_removal_reorder,
#ifdef ENABLE_ONEDNN_FOR_GPU #ifdef ENABLE_ONEDNN_FOR_GPU
TEST(reorder_onednn_gpu, basic_convert_int8) { TEST(reorder_onednn_gpu, basic_convert_int8) {
auto& engine = get_onednn_test_engine(); auto& engine = get_onednn_test_engine();
if (!engine.get_device_info().supports_immad)
return;
layout in_layout = { type_to_data_type<float>::value, format::byxf, { 1, 1, 3, 3 } }; layout in_layout = { type_to_data_type<float>::value, format::byxf, { 1, 1, 3, 3 } };
layout byte_layout = { type_to_data_type<int8_t>::value, format::bfyx, { 1, 1, 3, 3 } }; layout byte_layout = { type_to_data_type<int8_t>::value, format::bfyx, { 1, 1, 3, 3 } };
std::initializer_list<float> input_f = { 1.0f, -2.6f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f }; std::initializer_list<float> input_f = { 1.0f, -2.6f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f };