[GPU] Fix eltwise fusions in case of LT_ALIGNED_READ and vec_size>1 (#14275)
This commit is contained in:
parent
0b2f3347f6
commit
f02c663a1d
@ -1957,24 +1957,32 @@ std::string FusedOpsCodeGenerator::GetJitLoad(const FusedOpsConfiguration& conf,
|
||||
|
||||
// Fsv16 Eltwise whcih requires f axis broadcast such as input[1,1,z,1,1], output[b,f,z,y,x] need to use LT unligned read.
|
||||
// In this case, intel_sub_group_block_read() introduces increasing index in feature block.
|
||||
bool f_axis_broadcast = ((input_tensor.Feature().v != prim_output.Feature().v) && (input_tensor.Feature().v == 1) && (vec_size == 1));
|
||||
bool f_axis_broadcast = (input_tensor.Feature().v != prim_output.Feature().v) && (input_tensor.Feature().v == 1);
|
||||
// Change JitLoad to ignore LT_ALIGNED_READ LoadType if this input tensor has a planar format(SimpleLayout)
|
||||
if (desc.GetType() == KernelType::ELTWISE && input_tensor.SimpleLayout() && input_tensor.GetLayout() != orig_output_layout &&
|
||||
if (desc.GetType() == KernelType::ELTWISE &&
|
||||
conf.load_type == FusedOpsConfiguration::LoadType::LT_ALIGNED_READ &&
|
||||
(input_tensor.SameDimsSizes(prim_output) || f_axis_broadcast) && input_tensor.LogicalSize() != 1) {
|
||||
((input_tensor.SimpleLayout() && input_tensor.GetLayout() != orig_output_layout) || f_axis_broadcast) &&
|
||||
(input_tensor.SameDimsSizes(prim_output) || f_axis_broadcast) &&
|
||||
input_tensor.LogicalSize() != 1) {
|
||||
std::string sub_group_local_id_str = "get_sub_group_local_id";
|
||||
size_t found_sub = conf.bfzyx_idx_order[1].rfind(sub_group_local_id_str);
|
||||
if (found_sub != std::string::npos) {
|
||||
throw std::runtime_error("[clDNN] LT ALIGNED LoadType is used with get_sub_group_local_id.");
|
||||
}
|
||||
OPENVINO_ASSERT(found_sub == std::string::npos, "[GPU] LT_ALIGNED_READ LoadType is used with get_sub_group_local_id.");
|
||||
|
||||
auto new_idx_order = conf.bfzyx_idx_order;
|
||||
new_idx_order[1] = "(" + conf.bfzyx_idx_order[1] + " + " + sub_group_local_id_str + "()" + ")";
|
||||
|
||||
std::string new_index_func_call = GetIdx(input_id, idx_desc{new_idx_order, desc.tensors[input_id]}, safe_load);
|
||||
if (vec_size > 1) {
|
||||
throw std::runtime_error("[clDNN] Mixed layouts of input tensors are supported only if vector size is 1 :"
|
||||
"[" + toString_v2(input_tensor) + "/" + toString_v2(prim_output));
|
||||
auto vec_axis_idx = conf.GetDimIndexFromOrder(conf.vec_axis);
|
||||
OPENVINO_ASSERT(vec_axis_idx != -1, "[GPU] Incorrect vec_axis value ", static_cast<int>(conf.vec_axis),
|
||||
" for bfzyx_idx_order order");
|
||||
new_idx_order[vec_axis_idx] = "((" + conf.bfzyx_idx_order[vec_axis_idx] + ") + loop_var)";
|
||||
}
|
||||
std::string new_index_func_call = GetIdx(input_id, idx_desc{new_idx_order, desc.tensors[input_id]}, safe_load);
|
||||
|
||||
if (vec_size > 1) {
|
||||
std::string load_str = "0;"; // Assign zero to initial variable (GetInputVarName(input_id)) and modify it in the loop below
|
||||
load_str += "for (uint loop_var = 0; loop_var < " + std::to_string(vec_size) + "; loop_var++) { ";
|
||||
load_str += GetInputVarName(input_id) + "[loop_var] = " + GetInputPtrName(input_id) + "[" + new_index_func_call + "]; }";
|
||||
return load_str;
|
||||
} else {
|
||||
return GetInputPtrName(input_id) + "[" + new_index_func_call + "]";
|
||||
}
|
||||
|
@ -485,6 +485,18 @@ struct FusedOpsConfiguration {
|
||||
return *this; }
|
||||
FusedOpsConfiguration& SetShuffleVarName(std::string val) { shuffle_var_name = val; return *this; }
|
||||
bool IsPostReorderFused(void) const { return orig_output_layout != DataLayout::DataLayoutCount; }
|
||||
int GetDimIndexFromOrder(Tensor::DataChannelName val) const {
|
||||
int dims_num = bfzyx_idx_order.size();
|
||||
if (val == Tensor::DataChannelName::BATCH && dims_num >= 1) {
|
||||
return 0;
|
||||
} else if (val == Tensor::DataChannelName::FEATURE && dims_num >= 2) {
|
||||
return 1;
|
||||
} else if (dims_num >= 3 && dims_num - static_cast<int>(val) - 1 >= 0) {
|
||||
return bfzyx_idx_order.size() - static_cast<int>(val) - 1;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Dependency(Input) type of fusing operation in fused node.
|
||||
|
@ -251,6 +251,30 @@ TEST_P(eltwise_fp32_fsv16, add_per_element) {
|
||||
execute(p);
|
||||
}
|
||||
|
||||
TEST_P(eltwise_fp32_fsv16, add_broadcast) {
|
||||
auto p = GetParam();
|
||||
auto eltwise2_layout = layout{ p.default_type, p.default_format, tensor{ 1, 1, get_input_layout(p).spatial(0), 1 } };
|
||||
|
||||
create_topologies(
|
||||
input_layout("input", get_input_layout(p)),
|
||||
input_layout("input2", get_input_layout2(p)),
|
||||
data("add_data", get_mem(eltwise2_layout, -10, 10)),
|
||||
eltwise("eltwise", { input_info("input"), input_info("input2") }, p.mode, p.default_type),
|
||||
eltwise("add", { input_info("eltwise"), input_info("add_data") }, eltwise_mode::sum),
|
||||
activation("activation", input_info("add"), activation_func::negative),
|
||||
reorder("out", input_info("activation"), p.default_format, data_types::f32)
|
||||
);
|
||||
// Activation won't be fused because onednn doesn't support negative activation
|
||||
if (engine.get_device_info().supports_immad)
|
||||
p.expected_fused_primitives++;
|
||||
|
||||
implementation_desc eltw_impl = { format::b_fs_yx_fsv16, "eltwise_b_fs_yx_fsv16" };
|
||||
bo_fused.set_option(build_option::force_implementations({ { "eltwise", eltw_impl } }));
|
||||
|
||||
tolerance = 1e-5f;
|
||||
execute(p);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(fusings_gpu, eltwise_fp32_fsv16, ::testing::ValuesIn(std::vector<eltwise_test_params>{
|
||||
eltwise_test_params{ CASE_ELTWISE_FP16_3, 3, 5 },
|
||||
eltwise_test_params{ CASE_ELTWISE_FP32_3, 3, 5 },
|
||||
|
Loading…
Reference in New Issue
Block a user