[GPU][DG2] Adjust expected fuse counts (#15592)
This commit is contained in:
parent
67fff4adcc
commit
148c7cc617
@ -177,9 +177,6 @@ TEST_P(activation_eltwise_activation_quantize_u8, basic) {
|
||||
input_info("out_low"), input_info("out_high"), 256, data_types::u8),
|
||||
reorder("reorder_bfyx", input_info("quant"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support softsign activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
tolerance = 1.f;
|
||||
execute(p);
|
||||
@ -201,9 +198,6 @@ TEST_P(activation_eltwise_activation_quantize_u8, per_channel) {
|
||||
input_info("out_low"), input_info("out_high"), 256, data_types::u8),
|
||||
reorder("reorder_bfyx", input_info("quant"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support softsign activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
tolerance = 1.f;
|
||||
execute(p);
|
||||
|
@ -2025,18 +2025,18 @@ TEST_P(conv_int8_activation_eltwise_quantize, fsv32) {
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_int8_activation_eltwise_quantize, ::testing::ValuesIn(std::vector<convolution_test_params>{
|
||||
convolution_test_params{ CASE_CONV_U8S8_1, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_2, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_3, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_4, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_7, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_8, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_1, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_2, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_3, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_4, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_7, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_8, 2, 4, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_1, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_2, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_3, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_4, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_7, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_8, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_1, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_2, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_3, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_4, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_7, 2, 2, 5 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_8, 2, 2, 5 },
|
||||
}));
|
||||
|
||||
class conv_int8_activation : public ConvFusingTest {};
|
||||
@ -2119,18 +2119,18 @@ TEST_P(conv_int8_activation_eltwise, fsv32) {
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(fusings_gpu, conv_int8_activation_eltwise, ::testing::ValuesIn(std::vector<convolution_test_params>{
|
||||
convolution_test_params{ CASE_CONV_U8S8_1, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_2, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_3, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_4, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_7, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_8, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_1, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_2, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_3, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_4, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_7, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_8, 2, 4, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_1, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_2, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_3, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_4, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_7, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_U8S8_8, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_1, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_2, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_3, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_4, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_7, 2, 2, 4 },
|
||||
convolution_test_params{ CASE_CONV_S8S8_8, 2, 2, 4 },
|
||||
}));
|
||||
|
||||
class conv_int8_quantize_u8 : public ConvFusingTest {};
|
||||
|
@ -121,7 +121,7 @@ TEST_P(eltwise_quantize, u8) {
|
||||
reorder("out", input_info("quantize"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1.f;
|
||||
tolerance = default_tolerance(data_types::i8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ TEST_P(eltwise_quantize, i8_per_channel) {
|
||||
reorder("out", input_info("quantize"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1.f;
|
||||
tolerance = default_tolerance(data_types::i8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -189,11 +189,8 @@ TEST_P(eltwise_const_path, not_fuse_to_const_eltwise) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -218,14 +215,11 @@ TEST_P(eltwise_fp32_fsv16, add) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -240,14 +234,11 @@ TEST_P(eltwise_fp32_fsv16, add_per_element) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -264,14 +255,11 @@ TEST_P(eltwise_fp32_fsv16, add_broadcast) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -292,14 +280,11 @@ TEST_P(eltwise_fp32_fsv32, add) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::fs_b_yx_fsv32, "eltwise_fs_b_yx_fsv32" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -314,14 +299,11 @@ TEST_P(eltwise_fp32_fsv32, add_per_element) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::fs_b_yx_fsv32, "eltwise_fs_b_yx_fsv32" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -343,14 +325,11 @@ TEST_P(eltwise_fp32_fsv4, add) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::b_fs_yx_fsv4, "eltwise_b_fs_yx_fsv4" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -365,14 +344,11 @@ TEST_P(eltwise_fp32_fsv4, add_per_element) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::b_fs_yx_fsv4, "eltwise_b_fs_yx_fsv4" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -396,7 +372,7 @@ TEST_P(eltwise_fp32_fused_prims, scale_activation) {
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -412,7 +388,7 @@ TEST_P(eltwise_fp32_fused_prims, eltwise_activation) {
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -428,7 +404,7 @@ TEST_P(eltwise_fp32_fused_prims, eltwise_activation_with_broadcast) {
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -476,7 +452,7 @@ TEST_P(eltwise_fp32_scale, 6d) {
|
||||
reorder("out", input_info("scale"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -496,14 +472,11 @@ TEST_P(eltwise_fp16_byxf, add) {
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
ov::intel_gpu::ImplementationDesc eltw_impl = { format::byxf, "generic_eltwise_ref" };
|
||||
cfg_fused.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -527,7 +500,7 @@ TEST_P(eltwise_no_pitches_same_dims_quantize, quantize_f32_output) {
|
||||
reorder("out", input_info("quantize"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1.f;
|
||||
tolerance = default_tolerance(data_types::i8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -551,7 +524,7 @@ TEST_P(eltwise_activation, basic) {
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -565,7 +538,7 @@ TEST_P(eltwise_activation, fp16_out) {
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-5f;
|
||||
tolerance = default_tolerance(p.input_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
|
@ -121,11 +121,8 @@ TEST_P(lrn_fp32_quantize_u8_eltwise_activation, basic) {
|
||||
activation("activation", input_info("eltwise"), activation_func::floor),
|
||||
reorder("reorder", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support floor activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
tolerance = 1.0f;
|
||||
tolerance = default_tolerance(data_types::u8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -152,7 +149,7 @@ TEST_P(lrn_fp32_quantize_u8_eltwise_activation, per_channel) {
|
||||
reorder("reorder", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1.0f;
|
||||
tolerance = default_tolerance(data_types::u8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -203,7 +200,7 @@ TEST_P(lrn_fp32_quantize_i8_eltwise_activation, basic) {
|
||||
reorder("reorder", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1.0f;
|
||||
tolerance = default_tolerance(data_types::i8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -247,7 +244,7 @@ TEST_P(lrn_fp32_eltwise_activation_quantize_u8, basic) {
|
||||
reorder("reorder", input_info("quantize"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1.0f;
|
||||
tolerance = default_tolerance(data_types::u8);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
@ -282,7 +279,7 @@ TEST_P(lrn_fp16_eltwise_activation, basic) {
|
||||
reorder("reorder", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
|
||||
tolerance = 1e-05f;
|
||||
tolerance = default_tolerance(p.data_type);
|
||||
execute(p);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user