[GPU] add check condition of input dynamic shape in conv fusing (#19219)

This commit is contained in:
Wilson Seok 2023-08-25 21:13:53 +09:00 committed by GitHub
parent 39b75fd213
commit f962511a84
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 65 additions and 17 deletions

View File

@ -494,7 +494,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program &p) {
if (_lo.get_optimization_attributes().use_onednn_impls == 1)
return true;
if (node.get_output_layout().is_dynamic()) {
if (node.get_output_layout().is_dynamic() || node.get_input_layout().is_dynamic()) {
return true;
}

View File

@ -25,7 +25,8 @@ typedef std::tuple<
ElementType, // Input precision
ElementType, // Output precision
InputShape, // Input shape
LayerTestsUtils::TargetDevice // Device name
LayerTestsUtils::TargetDevice, // Device name
bool // activation fusing
> convLayerTestParamsSet;
@ -38,7 +39,8 @@ public:
ElementType inType, outType;
InputShape inputShape;
std::string targetDevice;
std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = obj.param;
bool activationFusing;
std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = obj.param;
ngraph::op::PadType padType;
InferenceEngine::SizeVector kernel, stride, dilation;
@ -64,7 +66,8 @@ public:
result << "netPRC=" << netType << "_";
result << "inPRC=" << inType << "_";
result << "outPRC=" << outType << "_";
result << "trgDev=" << targetDevice;
result << "trgDev=" << targetDevice << "_";
result << "activationFusing=" << activationFusing;
return result.str();
}
@ -74,7 +77,8 @@ protected:
convSpecificParams convParams;
InputShape inputShape;
auto netType = ElementType::undefined;
std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = this->GetParam();
bool activationFusing;
std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = this->GetParam();
init_input_shapes({inputShape});
@ -92,12 +96,21 @@ protected:
auto convolutionNode = ngraph::builder::makeConvolution(paramOuts.front(), netType, kernel, stride, padBegin,
padEnd, dilation, padType, convOutChannels);
if (activationFusing) {
auto activationNode = ngraph::builder::makeActivation(convolutionNode, netType, ngraph::helpers::ActivationTypes::Relu);
ngraph::ResultVector results;
for (size_t i = 0; i < convolutionNode->get_output_size(); i++)
results.push_back(std::make_shared<ngraph::opset1::Result>(convolutionNode->output(i)));
ngraph::ResultVector results;
for (size_t i = 0; i < activationNode->get_output_size(); i++)
results.push_back(std::make_shared<ngraph::opset1::Result>(activationNode->output(i)));
function = std::make_shared<ngraph::Function>(results, inputParams, "Convolution");
function = std::make_shared<ngraph::Function>(results, inputParams, "Convolution");
} else {
ngraph::ResultVector results;
for (size_t i = 0; i < convolutionNode->get_output_size(); i++)
results.push_back(std::make_shared<ngraph::opset1::Result>(convolutionNode->output(i)));
function = std::make_shared<ngraph::Function>(results, inputParams, "Convolution");
}
}
};
@ -130,7 +143,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic1DSymPad, Convolut
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes1D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
const std::vector<SizeVector> kernels1D = { {3}, {1} };
@ -174,7 +188,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_ExplicitPad1D, Convolutio
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(inputShapes1D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ======== 2D convolutions
@ -184,6 +199,14 @@ const std::vector<ov::test::InputShape> dynInputShapes2D = {
{{1, 10, 20, 20}, {1, 10, 30, 30}, {1, 10, 40, 20}}
},
};
// Specific range causes output static shapeS
const std::vector<ov::test::InputShape> dynInputShapes2D_static_output = {
{
{1, 128, {1, 2}, {1, 2}},
{{1, 128, 1, 1}, {1, 128, 2, 2}}
},
};
// ==== Symmetric pad
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, ConvolutionLayerGPUTestDynamic,
::testing::Combine(
@ -199,7 +222,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, Convolut
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes2D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ==== Symmetric auto pad
@ -217,7 +241,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, Conv
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes2D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ==== Asymmetric pad
@ -235,7 +260,27 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, Convol
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes2D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ==== Static output
INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_static_output, ConvolutionLayerGPUTestDynamic,
::testing::Combine(
::testing::Combine(
::testing::Values(SizeVector{3, 3}),
::testing::Values(SizeVector{2, 2}),
::testing::Values(std::vector<ptrdiff_t>{1, 1}),
::testing::Values(std::vector<ptrdiff_t>{1, 1}),
::testing::Values(SizeVector{1, 1}),
::testing::Values(256),
::testing::Values(ngraph::op::PadType::EXPLICIT)),
::testing::Values(ElementType::f32),
::testing::Values(ElementType::f32),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes2D_static_output),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(true)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ======== 3D convolutions
@ -261,7 +306,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, Convolut
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes3D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ==== Symmetric auto pad
@ -279,7 +325,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, Conv
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes3D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
// ==== Asymmetric pad
@ -297,7 +344,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DAsymPad, Convolu
::testing::Values(ElementType::f16),
::testing::Values(ElementType::undefined),
::testing::ValuesIn(dynInputShapes3D),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU)),
::testing::Values<std::string>(ov::test::utils::DEVICE_GPU),
::testing::Values(false)),
ConvolutionLayerGPUTestDynamic::getTestCaseName);
} // namespace