From 9f481e8eaaaf7571420fbb5e4c2253901b964e47 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Tue, 13 Dec 2022 16:59:55 +0400 Subject: [PATCH] [GPU] Fix Pad Primitive shape inference (#14282) --- src/plugins/intel_gpu/src/graph/border.cpp | 8 ++-- .../intel_gpu/src/graph/impls/ocl/border.cpp | 9 +++- src/plugins/intel_gpu/src/plugin/ops/pad.cpp | 18 ++------ .../tests/shape_infer/pad_si_test.cpp | 7 +++ .../gpu/concurrency/gpu_concurrency_tests.cpp | 4 +- .../gpu/single_layer_tests/dynamic/pad.cpp | 44 +++++++++++++++++++ 6 files changed, 68 insertions(+), 22 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/border.cpp b/src/plugins/intel_gpu/src/graph/border.cpp index db94d617292..16ab37e2268 100644 --- a/src/plugins/intel_gpu/src/graph/border.cpp +++ b/src/plugins/intel_gpu/src/graph/border.cpp @@ -25,8 +25,8 @@ layout border_inst::calc_output_layout(border_node const& node, kernel_impl_para auto new_dims = input_layout.get_dims(); for (size_t i = 0; i < new_dims.size(); ++i) { - new_dims[i] += desc->pads_begin[i]; - new_dims[i] += desc->pads_end[i]; + new_dims[i] += (i < desc->pads_begin.size()) ? desc->pads_begin[i] : 0; + new_dims[i] += (i < desc->pads_end.size()) ? desc->pads_end[i] : 0; } return layout{ input_layout.data_type, input_format, tensor(dims_format, new_dims) }; } @@ -129,7 +129,7 @@ border_inst::typed_primitive_inst(network& network, border_node const& node) : p if (pad_mode == ov::op::PadMode::SYMMETRIC) { bool valid_pads = true; - for (size_t i = 0; i < input_sizes.size(); ++i) { + for (size_t i = 0; i < argument->pads_begin.size(); ++i) { valid_pads &= argument->pads_begin[i] <= input_sizes[i]; valid_pads &= argument->pads_end[i] <= input_sizes[i]; } @@ -140,7 +140,7 @@ border_inst::typed_primitive_inst(network& network, border_node const& node) : p } else if (pad_mode == ov::op::PadMode::REFLECT) { bool valid_pads = true; - for (size_t i = 0; i < input_sizes.size(); ++i) { + for (size_t i = 0; i < argument->pads_begin.size(); ++i) { valid_pads &= argument->pads_begin[i] < input_sizes[i]; valid_pads &= argument->pads_end[i] < input_sizes[i]; } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/border.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/border.cpp index 7aabf8352d0..47f2fa6a076 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/border.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/border.cpp @@ -31,10 +31,17 @@ struct border_impl : typed_primitive_impl_ocl { auto params = get_default_params(impl_param, 1); auto optional_params = get_default_optional_params(impl_param.get_program()); - format pads_format = format::adjust_to_rank(format::bfyx, impl_param.get_input_layout(0).get_rank()); + size_t rank = impl_param.get_input_layout(0).get_rank(); + format pads_format = format::adjust_to_rank(format::bfyx, rank); std::vector pads_begin(primitive->pads_begin.begin(), primitive->pads_begin.end()); std::vector pads_end(primitive->pads_end.begin(), primitive->pads_end.end()); + if (pads_begin.size() < rank) { + size_t zeros_to_add = rank - pads_begin.size(); + pads_begin.insert(pads_begin.end(), zeros_to_add, 0); + pads_end.insert(pads_end.end(), zeros_to_add, 0); + } + params.lt_sizes = convert_dim_vector(tensor(pads_format, pads_begin, 0)); params.rb_sizes = convert_dim_vector(tensor(pads_format, pads_end, 0)); params.border_value = primitive->pad_value; diff --git a/src/plugins/intel_gpu/src/plugin/ops/pad.cpp b/src/plugins/intel_gpu/src/plugin/ops/pad.cpp index 864d4d96e03..ad97137d99c 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/pad.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/pad.cpp @@ -17,30 +17,18 @@ static void CreatePadOp(Program& p, const std::shared_ptr& validate_inputs_count(op, {3, 4}); auto inputs = p.GetInputInfo(op); std::string layerName = layer_type_name_ID(op); - size_t rank = std::max(op->get_input_partial_shape(0).size(), static_cast(4)); float pad_value = 0.f; if (op->get_input_size() == 4) { auto const_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(3)); - if (!const_node) { - IE_THROW() << "Unsupported const node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")"; - } + OPENVINO_ASSERT(const_node, "Unsupported const node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); ngraph::op::util::get_single_value(const_node, pad_value); } - auto pads_begin = op->get_pads_begin(); - auto pads_end = op->get_pads_end(); - - if (pads_begin.size() < rank) { - size_t zeros_to_add = rank - pads_begin.size(); - pads_begin.insert(pads_begin.end(), zeros_to_add, 0); - pads_end.insert(pads_end.end(), zeros_to_add, 0); - } - auto tilePrim = cldnn::border(layerName, inputs[0], - pads_begin, - pads_end, + op->get_pads_begin(), + op->get_pads_end(), op->get_pad_mode(), pad_value); diff --git a/src/plugins/intel_gpu/tests/shape_infer/pad_si_test.cpp b/src/plugins/intel_gpu/tests/shape_infer/pad_si_test.cpp index bed549f4edf..2fd4a2a2355 100644 --- a/src/plugins/intel_gpu/tests/shape_infer/pad_si_test.cpp +++ b/src/plugins/intel_gpu/tests/shape_infer/pad_si_test.cpp @@ -125,6 +125,13 @@ INSTANTIATE_TEST_SUITE_P(smoke, pad_test_single_input, layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {1, 0, 3, 7}, ov::op::PadMode::CONSTANT, 1.f, layout{ov::PartialShape{{1, -1},{5, -1},{5, -1},{8, -1}}, data_types::f32, format::bfyx} + }, + { + layout{ov::PartialShape::dynamic(2), data_types::f32, format::bfyx}, + layout{ov::PartialShape{2}, data_types::i64, format::bfyx}, {0, 5}, + layout{ov::PartialShape{2}, data_types::i64, format::bfyx}, {1, 0}, + ov::op::PadMode::CONSTANT, 1.f, + layout{ov::PartialShape{{1, -1},{5, -1}}, data_types::f32, format::bfyx} } })); diff --git a/src/tests/functional/plugin/gpu/concurrency/gpu_concurrency_tests.cpp b/src/tests/functional/plugin/gpu/concurrency/gpu_concurrency_tests.cpp index c1117f7e99c..b8156314737 100644 --- a/src/tests/functional/plugin/gpu/concurrency/gpu_concurrency_tests.cpp +++ b/src/tests/functional/plugin/gpu/concurrency/gpu_concurrency_tests.cpp @@ -303,7 +303,7 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) { } else { iter1++; ov::Tensor output_tensor = infer_request1.get_output_tensor(); - compare_results(output_tensor, ref[iter1 % 2].data()); + compare_results(output_tensor, ref[0].data()); if (iter1 < niter_limit) { infer_request1.set_output_tensor(output_tensors[(iter1 + 1) % 2]); infer_request1.start_async(); @@ -317,7 +317,7 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) { } else { iter2++; ov::Tensor output_tensor = infer_request2.get_output_tensor(); - compare_results(output_tensor, ref[(iter2 + 1) % 2].data()); + compare_results(output_tensor, ref[1].data()); if (iter2 < niter_limit) { infer_request2.set_output_tensor(output_tensors[iter2 % 2]); infer_request2.start_async(); diff --git a/src/tests/functional/plugin/gpu/single_layer_tests/dynamic/pad.cpp b/src/tests/functional/plugin/gpu/single_layer_tests/dynamic/pad.cpp index b4b211bcfe0..d3062c78a12 100644 --- a/src/tests/functional/plugin/gpu/single_layer_tests/dynamic/pad.cpp +++ b/src/tests/functional/plugin/gpu/single_layer_tests/dynamic/pad.cpp @@ -91,6 +91,50 @@ const std::vector padMode = { ngraph::helpers::PadMode::SYMMETRIC }; +/* *======================* Dynamic Shapes Tests 2D *======================* */ + +const std::vector inputShapesDynamic2D = { + {{-1, -1}, // dynamic + {{5, 36}, {3, 16}}}, // target + + {{-1, 32}, // dynamic + {{5, 32}}}, // target + + {{{1, 5}, {16, 32}}, // dynamic + {{3, 16}, {5, 24}}}, // target +}; + +const std::vector> padsBegin2D_Smoke = {{0, 1}, {0, 2}}; +const std::vector> padsEnd2D_Smoke = {{0, 2}, {0, 0}}; + +INSTANTIATE_TEST_SUITE_P( + smoke_GPUPadDynamic2DConst, + PadLayerGPUTest, + ::testing::Combine( + ::testing::ValuesIn(inputShapesDynamic2D), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(padsBegin2D_Smoke), + ::testing::ValuesIn(padsEnd2D_Smoke), + ::testing::ValuesIn(argPadValue), + ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + PadLayerGPUTest::getTestCaseName +); + +INSTANTIATE_TEST_SUITE_P( + smoke_GPUPadDynamic2D, + PadLayerGPUTest, + ::testing::Combine( + ::testing::ValuesIn(inputShapesDynamic2D), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(padsBegin2D_Smoke), + ::testing::ValuesIn(padsEnd2D_Smoke), + ::testing::Values(0), + ::testing::ValuesIn(padMode)), + PadLayerGPUTest::getTestCaseName +); + +/* *======================* *=====================* *======================* */ + /* *======================* Dynamic Shapes Tests 4D *======================* */ const std::vector inputShapesDynamic4D = {