[GPU][DG2] Fix some testcases (#15774)

* C++ exception with description write lock_type thrown in the test body. 
   Use get_output_values_to_float()
   * fusings_gpu/gemm_2in_act_scale_quantize_eltwise_i8.basic/2
   * fusings_gpu/gemm_2in_act_scale_eltwise.basic/2
* Remove WA test code of [GPU][DG2] Fix fusings_gpu/gemm_2in_scale.basic/7 #15353
   * Now non full-tensor post-ops are broadcasted
This commit is contained in:
Dohyun Kim (Felix) 2023-02-23 14:23:40 +09:00 committed by GitHub
parent ed65583957
commit 1f196bacd3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 29 additions and 19 deletions

View File

@ -74,20 +74,15 @@ public:
ASSERT_EQ(outputs_ref.size(), outputs_fused.size());
ASSERT_EQ(outputs_ref.size(), size_t(1));
auto output_not_fused_prim = outputs_ref.begin()->second.get_memory();
auto output_fused_prim = outputs_fused.begin()->second.get_memory();
if (output_not_fused_prim->get_layout().data_type == data_types::f32) {
cldnn::mem_lock<float> ref(output_not_fused_prim, get_test_stream());
cldnn::mem_lock<float> output_ptr(output_fused_prim, get_test_stream());
for (size_t i = 0; i < output_fused_prim->get_layout().count(); i++) {
ASSERT_NEAR(ref[i], output_ptr[i], tolerance) << "i = " << i;
}
} else {
cldnn::mem_lock<int16_t> ref(output_not_fused_prim, get_test_stream());
cldnn::mem_lock<int16_t> output_ptr(output_fused_prim, get_test_stream());
for (size_t i = 0; i < output_fused_prim->get_layout().count(); i++) {
ASSERT_NEAR(half_to_float(ref[i]), half_to_float(output_ptr[i]), tolerance) << "i = " << i;
}
auto val_ref=get_output_values_to_float(not_fused, outputs_ref.begin()->first);
auto val_opt=get_output_values_to_float(fused, outputs_fused.begin()->first);
ASSERT_EQ(val_ref.size(), val_opt.size());
for (size_t i = 0; i < val_ref.size(); i++) {
ASSERT_NEAR(val_ref[i], val_opt[i], tolerance)
<< "tolerance = " << tolerance
<< "\ni = " << i
<< "\nref[i] = " << val_ref[i]
<< "\nopt[i] = " << val_opt[i];
}
}

View File

@ -74,11 +74,6 @@ public:
}
layout get_per_channel_layout(gemm_test_params& p) {
// WA: per channel binary post-operation is not supported for onednn gemm. Use single value for such case.
if (engine.get_device_info().supports_immad){
std::cout << "per_channel layout for onednn gemm not supported." << std::endl;
return layout{p.default_type, p.default_format, tensor{1, 1, 1, 1}};
}
return layout{ p.default_type, p.default_format, tensor{ 1, p.in_shapes.at(0).feature[0], 1, 1 } };
}

View File

@ -589,6 +589,26 @@ std::vector<float> get_output_values_to_float(network& net, const primitive_id&
ret.push_back(mem[i]);
return ret;
}
inline std::vector<float> get_output_values_to_float(network& net, const primitive_id& output_id, size_t max_cnt = std::numeric_limits<size_t>::max()) {
switch(net.get_output_layout(output_id).data_type){
case data_types::f16:
return get_output_values_to_float<FLOAT16>(net, output_id, max_cnt);
case data_types::f32:
return get_output_values_to_float<float>(net, output_id, max_cnt);
case data_types::i8:
return get_output_values_to_float<int8_t>(net, output_id, max_cnt);
case data_types::u8:
return get_output_values_to_float<uint8_t>(net, output_id, max_cnt);
case data_types::i32:
return get_output_values_to_float<int32_t>(net, output_id, max_cnt);
case data_types::i64:
return get_output_values_to_float<int64_t>(net, output_id, max_cnt);
default:
IE_THROW() << "Unknown output data_type";
}
}
double default_tolerance(data_types dt);
// inline void print_bin_blob(cldnn::memory& mem, std::string name)
// {