[LPT] Precision restriction customization extending: tests (#17196)
* [LPT] Precision restriction customization extending * comments fix: refactoring * [LPT] Precision restriction customization extending: tests
This commit is contained in:
parent
546581bcce
commit
14a14ecd76
@ -262,4 +262,37 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation,
|
|||||||
::testing::ValuesIn(addPrecisionPreserved)),
|
::testing::ValuesIn(addPrecisionPreserved)),
|
||||||
GroupConvolutionTransformation::getTestCaseName);
|
GroupConvolutionTransformation::getTestCaseName);
|
||||||
} // namespace depthwise
|
} // namespace depthwise
|
||||||
|
|
||||||
|
namespace i8_3d {
|
||||||
|
const std::vector<std::pair<ngraph::PartialShape, ngraph::Shape>> inputShapes = {
|
||||||
|
{{1, 6, 1, 24, 24}, {1, 24, 1, 18, 18}},
|
||||||
|
{{1, 24, 8, 12, 12}, {1, 24, 1, 1, 1}}
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::GroupConvolutionTransformationParam> params = {
|
||||||
|
// group convolution, tensor quantization
|
||||||
|
{
|
||||||
|
3ul,
|
||||||
|
-1,
|
||||||
|
{256ul, ngraph::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}},
|
||||||
|
{255ul, ngraph::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }},
|
||||||
|
true,
|
||||||
|
"Convolution",
|
||||||
|
"I8"
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<bool> addPrecisionPreserved = {false};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||||
|
::testing::ValuesIn(trasformationParamValues),
|
||||||
|
::testing::ValuesIn(inputShapes),
|
||||||
|
::testing::ValuesIn(params),
|
||||||
|
::testing::ValuesIn(addPrecisionPreserved)),
|
||||||
|
GroupConvolutionTransformation::getTestCaseName);
|
||||||
|
} // namespace i8_3d
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ std::shared_ptr<ngraph::Function> FakeQuantizeAndConvolutionFunction::get(
|
|||||||
const FakeQuantizeOnData& fqOnData,
|
const FakeQuantizeOnData& fqOnData,
|
||||||
const FakeQuantizeOnWeights& fqOnWeights) {
|
const FakeQuantizeOnWeights& fqOnWeights) {
|
||||||
const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length();
|
const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length();
|
||||||
OPENVINO_ASSERT(rankLength == 3ul || rankLength == 4ul, "not supported input shape rank: ", rankLength);
|
OPENVINO_ASSERT(rankLength == 3ul || rankLength == 4ul || rankLength == 5ul, "not supported input shape rank: ", rankLength);
|
||||||
|
|
||||||
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
||||||
const auto fakeQuantizeOnActivations = fqOnData.empty() ?
|
const auto fakeQuantizeOnActivations = fqOnData.empty() ?
|
||||||
|
@ -45,22 +45,30 @@ std::shared_ptr<Node> createWeightsOriginal(
|
|||||||
weightsValues);
|
weightsValues);
|
||||||
} else {
|
} else {
|
||||||
const size_t inputChannelsPerGroup = inputChannelsCount / groupCount;
|
const size_t inputChannelsPerGroup = inputChannelsCount / groupCount;
|
||||||
weights = ngraph::opset1::Constant::create(
|
if ((rankLength == 3) || (rankLength == 4)) {
|
||||||
precision,
|
weights = ngraph::opset1::Constant::create(
|
||||||
addReshape ?
|
precision,
|
||||||
(rankLength == 3 ?
|
addReshape ?
|
||||||
ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize } :
|
(rankLength == 3 ?
|
||||||
ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize, kernelSize }) :
|
ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize } :
|
||||||
(rankLength == 3 ?
|
ngraph::Shape{ outputChannelsCount, inputChannelsPerGroup, kernelSize, kernelSize }) :
|
||||||
ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize } :
|
(rankLength == 3 ?
|
||||||
ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize, kernelSize }),
|
ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize } :
|
||||||
weightsValues.size() == 1ul ?
|
ngraph::Shape{ groupCount, outputChannelsCount / groupCount, inputChannelsPerGroup, kernelSize, kernelSize }),
|
||||||
std::vector<float>(
|
weightsValues.size() == 1ul ?
|
||||||
rankLength == 3 ?
|
std::vector<float>(
|
||||||
outputChannelsCount * kernelSize * inputChannelsPerGroup :
|
rankLength == 3 ?
|
||||||
outputChannelsCount * kernelSize * kernelSize * inputChannelsPerGroup,
|
outputChannelsCount * kernelSize * inputChannelsPerGroup :
|
||||||
weightsValues[0]) :
|
outputChannelsCount * kernelSize * kernelSize * inputChannelsPerGroup,
|
||||||
weightsValues);
|
weightsValues[0]) :
|
||||||
|
weightsValues);
|
||||||
|
} else {
|
||||||
|
const ngraph::Shape shape{outputChannelsCount, inputChannelsPerGroup, 1ull, kernelSize, kernelSize};
|
||||||
|
const std::vector<float> values = weightsValues.size() == 1ull ?
|
||||||
|
std::vector<float>(shape_size(shape), weightsValues[0]) :
|
||||||
|
weightsValues;
|
||||||
|
weights = ngraph::opset1::Constant::create(precision, shape, values);
|
||||||
|
}
|
||||||
|
|
||||||
if (!fakeQuantizeOnWeights.empty()) {
|
if (!fakeQuantizeOnWeights.empty()) {
|
||||||
Shape constantShape;
|
Shape constantShape;
|
||||||
@ -91,23 +99,36 @@ std::shared_ptr<Node> createWeightsOriginal(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (addReshape) {
|
if (addReshape) {
|
||||||
|
std::vector<int64_t> values;
|
||||||
|
if (rankLength == 3ll) {
|
||||||
|
values = std::vector<int64_t>{
|
||||||
|
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||||
|
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||||
|
static_cast<int64_t>(inputChannelsPerGroup),
|
||||||
|
static_cast<int64_t>(kernelSize)};
|
||||||
|
} else if (rankLength == 4ll) {
|
||||||
|
values = std::vector<int64_t>{
|
||||||
|
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||||
|
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||||
|
static_cast<int64_t>(inputChannelsPerGroup),
|
||||||
|
static_cast<int64_t>(kernelSize),
|
||||||
|
static_cast<int64_t>(kernelSize)};
|
||||||
|
} else if (rankLength == 5ll) {
|
||||||
|
values = std::vector<int64_t>{
|
||||||
|
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||||
|
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||||
|
static_cast<int64_t>(inputChannelsPerGroup),
|
||||||
|
1,
|
||||||
|
static_cast<int64_t>(kernelSize),
|
||||||
|
static_cast<int64_t>(kernelSize)};
|
||||||
|
}
|
||||||
|
|
||||||
weights = std::make_shared<ngraph::opset1::Reshape>(
|
weights = std::make_shared<ngraph::opset1::Reshape>(
|
||||||
weights,
|
weights,
|
||||||
ngraph::opset1::Constant::create(
|
ngraph::opset1::Constant::create(
|
||||||
element::i64,
|
element::i64,
|
||||||
Shape{ static_cast<size_t>(rankLength) + 1ul },
|
Shape{ static_cast<size_t>(rankLength) + 1ul },
|
||||||
rankLength == 3 ?
|
values),
|
||||||
std::vector<int64_t> {
|
|
||||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
|
||||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
|
||||||
static_cast<int64_t>(inputChannelsPerGroup),
|
|
||||||
static_cast<int64_t>(kernelSize) } :
|
|
||||||
std::vector<int64_t> {
|
|
||||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
|
||||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
|
||||||
static_cast<int64_t>(inputChannelsPerGroup),
|
|
||||||
static_cast<int64_t>(kernelSize),
|
|
||||||
static_cast<int64_t>(kernelSize) }),
|
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -175,7 +196,7 @@ std::shared_ptr<ngraph::Function> GroupConvolutionFunction::getOriginal(
|
|||||||
const bool addReshape,
|
const bool addReshape,
|
||||||
const bool addPrecisionPreserved) {
|
const bool addPrecisionPreserved) {
|
||||||
const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length();
|
const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length();
|
||||||
OPENVINO_ASSERT(rankLength == 3 || rankLength == 4, "not supported input shape rank: ", rankLength);
|
OPENVINO_ASSERT(rankLength == 3 || rankLength == 4 || rankLength == 5, "not supported input shape rank: ", rankLength);
|
||||||
|
|
||||||
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user