[GPU] Adjust fusion count in DG2 (#12708)

This commit is contained in:
Felix Dohyun Kim 2022-08-29 15:50:19 +09:00 committed by GitHub
parent 79f1e720e7
commit d1765d1df8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 62 additions and 3 deletions

View File

@ -174,6 +174,9 @@ TEST_P(activation_eltwise_activation_quantize_u8, basic) {
quantize("quant", "act2", "in_low", "in_high", "out_low", "out_high", 256, data_types::u8),
reorder("reorder_bfyx", "quant", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support softsign activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1.f;
execute(p);
@ -194,6 +197,9 @@ TEST_P(activation_eltwise_activation_quantize_u8, per_channel) {
quantize("quant", "act2", "in_low", "in_high", "out_low", "out_high", 256, data_types::u8),
reorder("reorder_bfyx", "quant", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support softsign activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1.f;
execute(p);

View File

@ -1843,6 +1843,9 @@ TEST_P(conv_int8_activation_eltwise_quantize, fsv16) {
// TODO Add 5D int8 optimized convolution implementations
return;
}
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives += 2;
tolerance = 1.f;
execute(p);
@ -1873,6 +1876,9 @@ TEST_P(conv_int8_activation_eltwise_quantize, fsv32) {
// TODO Add 5D int8 optimized convolution implementations
return;
}
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives += 2;
tolerance = 1.f;
execute(p);
@ -1914,6 +1920,9 @@ TEST_P(conv_int8_activation_eltwise, fsv16) {
// TODO Add 5D int8 optimized convolution implementations
return;
}
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives += 2;
tolerance = 1e-5f;
execute(p);
@ -1939,6 +1948,9 @@ TEST_P(conv_int8_activation_eltwise, fsv32) {
// TODO Add 5D int8 optimized convolution implementations
return;
}
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives += 2;
tolerance = 1e-5f;
execute(p);

View File

@ -546,9 +546,8 @@ TEST_P(deconv_scale_actv_quant_i8, basic) {
quantize("quant", "actv", "in_lo", "in_hi", "out_lo", "out_hi", 255, data_types::i8),
reorder("out", "quant", p.default_format, data_types::f32)
);
//Activation won't be fused because onednn doesn't support softsign activation
if(engine.get_device_info().supports_immad)
// Activation won't be fused because onednn doesn't support softsign activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1.f;

View File

@ -187,6 +187,9 @@ TEST_P(eltwise_const_path, not_fuse_to_const_eltwise) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1e-5f;
execute(p);
@ -213,6 +216,9 @@ TEST_P(eltwise_fp32_fsv16, add) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
@ -232,6 +238,9 @@ TEST_P(eltwise_fp32_fsv16, add_per_element) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
@ -257,6 +266,9 @@ TEST_P(eltwise_fp32_fsv32, add) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::fs_b_yx_fsv32, "eltwise_fs_b_yx_fsv32" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
@ -276,6 +288,9 @@ TEST_P(eltwise_fp32_fsv32, add_per_element) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::fs_b_yx_fsv32, "eltwise_fs_b_yx_fsv32" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
@ -302,6 +317,9 @@ TEST_P(eltwise_fp32_fsv4, add) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::b_fs_yx_fsv4, "eltwise_b_fs_yx_fsv4" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
@ -321,6 +339,9 @@ TEST_P(eltwise_fp32_fsv4, add_per_element) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::b_fs_yx_fsv4, "eltwise_b_fs_yx_fsv4" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
@ -449,6 +470,9 @@ TEST_P(eltwise_fp16_byxf, add) {
activation("activation", "add", activation_func::negative),
reorder("out", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
implementation_desc eltw_impl = { format::byxf, "generic_eltwise_ref" };
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));

View File

@ -339,6 +339,9 @@ TEST_P(gemm_2in_act_scale_eltwise, basic) {
eltwise("sum", { "activation", "eltwise_data" }, eltwise_mode::sum, data_types::f32),
reorder("reorder_bfyx", "sum", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives += 2;
tolerance = 1e-4f;
execute(p);
@ -357,6 +360,9 @@ TEST_P(gemm_2in_act_scale_eltwise, broadcast_eltwise) {
eltwise("sum", { "activation", "eltwise_data" }, eltwise_mode::sum, data_types::f32),
reorder("reorder_bfyx", "sum", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support negative activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives += 2;
tolerance = 1e-4f;
execute(p);

View File

@ -120,6 +120,9 @@ TEST_P(lrn_fp32_quantize_u8_eltwise_activation, basic) {
activation("activation", "eltwise", activation_func::floor),
reorder("reorder", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support floor activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1.0f;
execute(p);

View File

@ -308,6 +308,9 @@ TEST_P(pooling_scale_activation_quantize, per_channel) {
quantize("quantize", "activation", "in_lo", "in_hi", "out_lo", "out_hi", 255, data_types::u8),
reorder("output_reorder", "quantize", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support atan activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1.0f;
execute(p);

View File

@ -242,6 +242,9 @@ TEST_P(reduce_scale_activation, basic) {
activation("activation", "scale", activation_func::cos),
reorder("output_reorder", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support cos activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1e-02f;
execute(p);
@ -257,6 +260,9 @@ TEST_P(reduce_scale_activation, per_channel) {
activation("activation", "scale", activation_func::cos),
reorder("output_reorder", "activation", p.default_format, data_types::f32)
);
// Activation won't be fused because onednn doesn't support cos activation
if (engine.get_device_info().supports_immad)
p.expected_fused_primitives++;
tolerance = 1e-02f;
execute(p);