[IE CLDNN] Fix bug with incompatible node and memory layouts (#5499)

This commit is contained in:
Andrei Molotkov
2021-05-04 17:06:57 +03:00
committed by GitHub
parent b291ca8cfa
commit f93c5e09aa

View File

@@ -31,6 +31,10 @@ void prepare_padding::run(program_impl& p) {
format == format::b_fs_zyx_fsv32)
continue;
if (prim_node.input().is_type<data>()) {
continue;
}
auto filter_size = prim_node.weights(0).get_output_layout().size;
auto needed_padding = calc_sliding_window_needed_input_padding(prim_node.input().get_output_layout(),
@@ -50,6 +54,10 @@ void prepare_padding::run(program_impl& p) {
if (!prim->with_output_size)
continue;
if (prim_node.input().is_type<data>()) {
continue;
}
auto filter_size = prim_node.weights(0).get_output_layout().size;
auto needed_padding = calc_sliding_window_needed_input_padding(prim_node.input().get_output_layout(),
@@ -69,6 +77,10 @@ void prepare_padding::run(program_impl& p) {
if (!prim->with_output_size)
continue;
if (prim_node.input().is_type<data>()) {
continue;
}
padding needed_padding;
// WA for this format. sliding window needs to be fixed --perf degradation for IncepctionV1 type models
if (node->get_output_layout().format == format::b_fs_yx_fsv16)
@@ -87,6 +99,10 @@ void prepare_padding::run(program_impl& p) {
} else if (node->is_type<binary_convolution>()) {
auto& prim_node = node->as<binary_convolution>();
if (prim_node.input().is_type<data>()) {
continue;
}
auto needed_padding = prim_node.input().get_output_layout().data_padding;
p.apply_needed_padding(prim_node, prim_node.input(), needed_padding);
@@ -127,8 +143,8 @@ void prepare_padding::run(program_impl& p) {
prev_prim_output_layout.data_type != data_types::i8 && prev_prim_output_layout.data_type != data_types::u8)
continue;
// We shoudn't apply any padding to nodes which are marked as outputs
if (conv_input_node.is_output())
// We shoudn't apply any padding to nodes which are marked as outputs or have type as data
if (conv_input_node.is_output() || conv_input_node.is_type<data>())
continue;
// Calculating input padding needed for convolution
@@ -183,8 +199,8 @@ void prepare_padding::run(program_impl& p) {
if (conv_layout.format != cldnn::format::bfyx && conv_layout.format != cldnn::format::b_fs_yx_32fp)
continue;
// We shoudn't apply any padding to nodes which are marked as outputs
if (conv_input_node.is_output())
// We shoudn't apply any padding to nodes which are marked as outputs or have type as data
if (conv_input_node.is_output() || conv_input_node.is_type<data>())
continue;
// Calculating input padding needed for convolution