diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp index 9460351beb5..6898a55f3b5 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp @@ -51,7 +51,8 @@ void prepare_padding::run(program& p) { if (!prim->with_output_size) continue; - + auto weights_layout = prim_node.weights().get_output_layout(); + tensor filter_size = weights_layout.convert_to_weights_layout(prim->grouped_weights_shape).get_tensor(); auto format = node->get_output_layout().format; if (format == format::b_fs_zyx_fsv16 || format == format::bs_fs_zyx_bsv16_fsv16 || @@ -60,8 +61,6 @@ void prepare_padding::run(program& p) { format == format::b_fs_zyx_fsv32) continue; - auto filter_size = prim_node.weights().get_output_layout().get_tensor(); - auto needed_padding = calc_sliding_window_needed_input_padding(prim_node.input().get_output_layout(), prim->output_size, filter_size, diff --git a/src/plugins/intel_gpu/tests/passes/prepare_padding_test.cpp b/src/plugins/intel_gpu/tests/passes/prepare_padding_test.cpp new file mode 100644 index 00000000000..6b5ee22ad4c --- /dev/null +++ b/src/plugins/intel_gpu/tests/passes/prepare_padding_test.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "test_utils.h" + +#include "intel_gpu/runtime/engine.hpp" +#include "intel_gpu/graph/program.hpp" +#include "data_inst.h" +#include "convolution_inst.h" +#include "pass_manager.h" +#include "program_wrapper.h" + +#include + +using namespace cldnn; +using namespace ::tests; + +TEST(prepare_padding, groupconv_with_output) { + auto& engine = get_test_engine(); + auto in_layout = layout{data_types::f16, format::bfyx, tensor{1, 18, 135, 76}}; + auto weight_layout = layout{data_types::f16, format::bfzyx, tensor{1, 18, 3, 3, 18}}; + auto weights_data = generate_random_5d(1, 18, 18, 3, 3, -1, 1); + auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfzyx, weight_layout.get_tensor()}); + set_values(weights_mem, weights_data); + + auto output_size = tensor{1, 18, 135, 76}; + ov::CoordinateDiff pad = {0, 0}; + topology topo; + topo.add(input_layout("input", in_layout)); + topo.add(data("weight", weights_mem)); + topo.add(convolution("conv", input_info("input"), { "weight" }, {}, 1, {1, 1}, {0, 0}, {1, 1}, output_size, data_types::f16, true)); + topo.add(reorder("reorder", input_info("conv"), format::bfyx, data_types::f32)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + auto prog = program::build_program(engine, topo, config, false, true); + reorder_factory rf; + program_wrapper::apply_opt_pass(*prog, true); + const auto& node = prog->get_node("reorder_input_conv"); + auto params = node.get_kernel_impl_params(); + ASSERT_EQ(params->get_output_layout().data_padding.upper_size().spatial[2], 0); +}