From 14a14ecd76592480ae70bf8a8c1cbc8e823af4b7 Mon Sep 17 00:00:00 2001 From: Edward Shogulin Date: Wed, 26 Apr 2023 13:53:04 +0100 Subject: [PATCH] [LPT] Precision restriction customization extending: tests (#17196) * [LPT] Precision restriction customization extending * comments fix: refactoring * [LPT] Precision restriction customization extending: tests --- .../group_convolution_transformation.cpp | 33 ++++++++ ...fake_quantize_and_convolution_function.cpp | 2 +- .../src/group_convolution_function.cpp | 79 ++++++++++++------- 3 files changed, 84 insertions(+), 30 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp index f9b5cf385f3..5affe100cef 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/group_convolution_transformation.cpp @@ -262,4 +262,37 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, ::testing::ValuesIn(addPrecisionPreserved)), GroupConvolutionTransformation::getTestCaseName); } // namespace depthwise + +namespace i8_3d { +const std::vector> inputShapes = { + {{1, 6, 1, 24, 24}, {1, 24, 1, 18, 18}}, + {{1, 24, 8, 12, 12}, {1, 24, 1, 1, 1}} +}; + +const std::vector params = { + // group convolution, tensor quantization + { + 3ul, + -1, + {256ul, ngraph::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}}, + {255ul, ngraph::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }}, + true, + "Convolution", + "I8" + }, +}; + +const std::vector addPrecisionPreserved = {false}; + +INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(trasformationParamValues), + ::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(params), + ::testing::ValuesIn(addPrecisionPreserved)), + GroupConvolutionTransformation::getTestCaseName); +} // namespace i8_3d } // namespace + diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp index b539c955f71..0cdf3209534 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp @@ -20,7 +20,7 @@ std::shared_ptr FakeQuantizeAndConvolutionFunction::get( const FakeQuantizeOnData& fqOnData, const FakeQuantizeOnWeights& fqOnWeights) { const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length(); - OPENVINO_ASSERT(rankLength == 3ul || rankLength == 4ul, "not supported input shape rank: ", rankLength); + OPENVINO_ASSERT(rankLength == 3ul || rankLength == 4ul || rankLength == 5ul, "not supported input shape rank: ", rankLength); const auto input = std::make_shared(precision, inputShape); const auto fakeQuantizeOnActivations = fqOnData.empty() ? diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp index 17e1b890c10..4b08086d183 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp @@ -45,22 +45,30 @@ std::shared_ptr createWeightsOriginal( weightsValues); } else { const size_t inputChannelsPerGroup = inputChannelsCount / groupCount; - weights = ngraph::opset1::Constant::create( - precision, - addReshape ? - (rankLength == 3 ? - ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize } : - ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize, kernelSize }) : - (rankLength == 3 ? - ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize } : - ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize, kernelSize }), - weightsValues.size() == 1ul ? - std::vector( - rankLength == 3 ? - outputChannelsCount * kernelSize * inputChannelsPerGroup : - outputChannelsCount * kernelSize * kernelSize * inputChannelsPerGroup, - weightsValues[0]) : - weightsValues); + if ((rankLength == 3) || (rankLength == 4)) { + weights = ngraph::opset1::Constant::create( + precision, + addReshape ? + (rankLength == 3 ? + ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize } : + ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize, kernelSize }) : + (rankLength == 3 ? + ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize } : + ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize, kernelSize }), + weightsValues.size() == 1ul ? + std::vector( + rankLength == 3 ? + outputChannelsCount * kernelSize * inputChannelsPerGroup : + outputChannelsCount * kernelSize * kernelSize * inputChannelsPerGroup, + weightsValues[0]) : + weightsValues); + } else { + const ngraph::Shape shape{outputChannelsCount, inputChannelsPerGroup, 1ull, kernelSize, kernelSize}; + const std::vector values = weightsValues.size() == 1ull ? + std::vector(shape_size(shape), weightsValues[0]) : + weightsValues; + weights = ngraph::opset1::Constant::create(precision, shape, values); + } if (!fakeQuantizeOnWeights.empty()) { Shape constantShape; @@ -91,23 +99,36 @@ std::shared_ptr createWeightsOriginal( } if (addReshape) { + std::vector values; + if (rankLength == 3ll) { + values = std::vector{ + calculatedDimention == 0 ? -1 : static_cast(groupCount), + calculatedDimention == 1 ? -1 : static_cast(outputChannelsCount / groupCount), + static_cast(inputChannelsPerGroup), + static_cast(kernelSize)}; + } else if (rankLength == 4ll) { + values = std::vector{ + calculatedDimention == 0 ? -1 : static_cast(groupCount), + calculatedDimention == 1 ? -1 : static_cast(outputChannelsCount / groupCount), + static_cast(inputChannelsPerGroup), + static_cast(kernelSize), + static_cast(kernelSize)}; + } else if (rankLength == 5ll) { + values = std::vector{ + calculatedDimention == 0 ? -1 : static_cast(groupCount), + calculatedDimention == 1 ? -1 : static_cast(outputChannelsCount / groupCount), + static_cast(inputChannelsPerGroup), + 1, + static_cast(kernelSize), + static_cast(kernelSize)}; + } + weights = std::make_shared( weights, ngraph::opset1::Constant::create( element::i64, Shape{ static_cast(rankLength) + 1ul }, - rankLength == 3 ? - std::vector { - calculatedDimention == 0 ? -1 : static_cast(groupCount), - calculatedDimention == 1 ? -1 : static_cast(outputChannelsCount / groupCount), - static_cast(inputChannelsPerGroup), - static_cast(kernelSize) } : - std::vector { - calculatedDimention == 0 ? -1 : static_cast(groupCount), - calculatedDimention == 1 ? -1 : static_cast(outputChannelsCount / groupCount), - static_cast(inputChannelsPerGroup), - static_cast(kernelSize), - static_cast(kernelSize) }), + values), true); } } @@ -175,7 +196,7 @@ std::shared_ptr GroupConvolutionFunction::getOriginal( const bool addReshape, const bool addPrecisionPreserved) { const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length(); - OPENVINO_ASSERT(rankLength == 3 || rankLength == 4, "not supported input shape rank: ", rankLength); + OPENVINO_ASSERT(rankLength == 3 || rankLength == 4 || rankLength == 5, "not supported input shape rank: ", rankLength); const auto input = std::make_shared(precision, inputShape);