[GPU] Fix unintentional expansion of pad's rank into 2D (#19106)

* [GPU] Fix unintentional expansion of pad's rank into 2D

* Add explicit 1d pad TCs for ov_gpu_unit_tests
This commit is contained in:
Andrew Kwangwoong Park 2023-08-16 14:29:00 +09:00 committed by GitHub
parent 188434e969
commit 8a0a4df941
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 55 additions and 9 deletions

View File

@ -75,20 +75,21 @@ public:
const auto& weights_layout = impl_param.input_layouts[1 + 0 + deform_conv_dep_offset]
.convert_to_weights_layout(primitive->grouped_weights_shape);
const auto& input_layout = impl_param.get_input_layout();
auto spatial_rank = input_layout.get_spatial_rank();
std::vector<int32_t> dims;
for (size_t i = 0; i < spatial_rank; i++) {
dims.push_back(static_cast<int32_t>(weights_layout.spatial(i)));
}
ov::Shape kernel(dims.begin(), dims.end());
ov::CoordinateDiff pads_begin(primitive->padding_begin.begin(), primitive->padding_begin.end());
ov::CoordinateDiff pads_end(primitive->padding_end.begin(), primitive->padding_end.end());
const auto auto_pad = primitive->auto_pad;
conv_params.has_explicit_paddings = primitive->auto_pad == ov::op::PadType::EXPLICIT;
if (auto_pad == ov::op::PadType::SAME_UPPER || auto_pad == ov::op::PadType::SAME_LOWER) {
const auto& input_layout = impl_param.get_input_layout();
auto spatial_rank = input_layout.get_spatial_rank();
std::vector<int32_t> dims;
for (size_t i = 0; i < spatial_rank; i++) {
dims.push_back(static_cast<int32_t>(weights_layout.spatial(i)));
}
ov::Shape kernel(dims.begin(), dims.end());
pads_begin.clear();
pads_end.clear();
OPENVINO_SUPPRESS_DEPRECATED_START
ngraph::try_apply_auto_padding(input_layout.get_partial_shape(),
kernel,
@ -98,13 +99,14 @@ public:
pads_end,
pads_begin);
OPENVINO_SUPPRESS_DEPRECATED_END
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
pads_end.resize(std::max<size_t>(2, pads_end.size()), 0);
}
if (auto_pad == ov::op::PadType::VALID) {
pads_begin = ov::CoordinateDiff(pads_begin.size(), 0);
pads_end = ov::CoordinateDiff(pads_end.size(), 0);
}
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
pads_end.resize(std::max<size_t>(2, pads_end.size()), 0);
uint32_t kx = weights_layout.spatial(0);
uint32_t ky = weights_layout.spatial(1);

View File

@ -130,6 +130,50 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic1DSymPad, Convolut
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
const std::vector<SizeVector> kernels1D = { {3}, {1} };
const std::vector<SizeVector> strides1D = { {1} };
const std::vector<std::vector<ptrdiff_t>> padBegins1D = { {0} };
const std::vector<std::vector<ptrdiff_t>> padEnds1D = { {0} };
const std::vector<SizeVector> dilations1D = { {1} };
const SizeVector numOutChannels = { 64, 63 };
const std::vector<InputShape> inputShapes1D = {
{{}, {{ 2, 64, 7 }}},
{{}, {{ 1, 67, 7 }}},
{
//dynamic shape
{ -1, 64, {1, 200} },
{ //target static shapes
{ 2, 64, 7 },
{ 1, 64, 9 }
}
},
{
//dynamic shape
{ {1, 200}, 64, -1 },
{ //target static shapes
{ 2, 64, 7 },
{ 1, 64, 5 }
}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_ExplicitPad1D, ConvolutionLayerGPUTestDynamic,
::testing::Combine(
::testing::Combine(
::testing::ValuesIn(kernels1D),
::testing::ValuesIn(strides1D),
::testing::ValuesIn(padBegins1D),
::testing::ValuesIn(padEnds1D),
::testing::ValuesIn(dilations1D),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::EXPLICIT)),
::testing::Values(ElementType::f16),
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(inputShapes1D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ======== 2D convolutions
const std::vector<ov::test::InputShape> dynInputShapes2D = {
{