[LPT] Precision restriction customization extending: tests (#17196)
* [LPT] Precision restriction customization extending * comments fix: refactoring * [LPT] Precision restriction customization extending: tests
This commit is contained in:
parent
546581bcce
commit
14a14ecd76
@ -262,4 +262,37 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation,
|
||||
::testing::ValuesIn(addPrecisionPreserved)),
|
||||
GroupConvolutionTransformation::getTestCaseName);
|
||||
} // namespace depthwise
|
||||
|
||||
namespace i8_3d {
|
||||
const std::vector<std::pair<ngraph::PartialShape, ngraph::Shape>> inputShapes = {
|
||||
{{1, 6, 1, 24, 24}, {1, 24, 1, 18, 18}},
|
||||
{{1, 24, 8, 12, 12}, {1, 24, 1, 1, 1}}
|
||||
};
|
||||
|
||||
const std::vector<LayerTestsDefinitions::GroupConvolutionTransformationParam> params = {
|
||||
// group convolution, tensor quantization
|
||||
{
|
||||
3ul,
|
||||
-1,
|
||||
{256ul, ngraph::Shape{1, 1, 1, 1, 1}, {-12.8f}, {12.7f}, {-12.8f}, {12.7f}},
|
||||
{255ul, ngraph::Shape { 1, 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -127.f }, { 127.f }},
|
||||
true,
|
||||
"Convolution",
|
||||
"I8"
|
||||
},
|
||||
};
|
||||
|
||||
const std::vector<bool> addPrecisionPreserved = {false};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LPT, GroupConvolutionTransformation,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(trasformationParamValues),
|
||||
::testing::ValuesIn(inputShapes),
|
||||
::testing::ValuesIn(params),
|
||||
::testing::ValuesIn(addPrecisionPreserved)),
|
||||
GroupConvolutionTransformation::getTestCaseName);
|
||||
} // namespace i8_3d
|
||||
} // namespace
|
||||
|
||||
|
@ -20,7 +20,7 @@ std::shared_ptr<ngraph::Function> FakeQuantizeAndConvolutionFunction::get(
|
||||
const FakeQuantizeOnData& fqOnData,
|
||||
const FakeQuantizeOnWeights& fqOnWeights) {
|
||||
const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length();
|
||||
OPENVINO_ASSERT(rankLength == 3ul || rankLength == 4ul, "not supported input shape rank: ", rankLength);
|
||||
OPENVINO_ASSERT(rankLength == 3ul || rankLength == 4ul || rankLength == 5ul, "not supported input shape rank: ", rankLength);
|
||||
|
||||
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
||||
const auto fakeQuantizeOnActivations = fqOnData.empty() ?
|
||||
|
@ -45,6 +45,7 @@ std::shared_ptr<Node> createWeightsOriginal(
|
||||
weightsValues);
|
||||
} else {
|
||||
const size_t inputChannelsPerGroup = inputChannelsCount / groupCount;
|
||||
if ((rankLength == 3) || (rankLength == 4)) {
|
||||
weights = ngraph::opset1::Constant::create(
|
||||
precision,
|
||||
addReshape ?
|
||||
@ -61,6 +62,13 @@ std::shared_ptr<Node> createWeightsOriginal(
|
||||
outputChannelsCount * kernelSize * kernelSize * inputChannelsPerGroup,
|
||||
weightsValues[0]) :
|
||||
weightsValues);
|
||||
} else {
|
||||
const ngraph::Shape shape{outputChannelsCount, inputChannelsPerGroup, 1ull, kernelSize, kernelSize};
|
||||
const std::vector<float> values = weightsValues.size() == 1ull ?
|
||||
std::vector<float>(shape_size(shape), weightsValues[0]) :
|
||||
weightsValues;
|
||||
weights = ngraph::opset1::Constant::create(precision, shape, values);
|
||||
}
|
||||
|
||||
if (!fakeQuantizeOnWeights.empty()) {
|
||||
Shape constantShape;
|
||||
@ -91,23 +99,36 @@ std::shared_ptr<Node> createWeightsOriginal(
|
||||
}
|
||||
|
||||
if (addReshape) {
|
||||
std::vector<int64_t> values;
|
||||
if (rankLength == 3ll) {
|
||||
values = std::vector<int64_t>{
|
||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||
static_cast<int64_t>(inputChannelsPerGroup),
|
||||
static_cast<int64_t>(kernelSize)};
|
||||
} else if (rankLength == 4ll) {
|
||||
values = std::vector<int64_t>{
|
||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||
static_cast<int64_t>(inputChannelsPerGroup),
|
||||
static_cast<int64_t>(kernelSize),
|
||||
static_cast<int64_t>(kernelSize)};
|
||||
} else if (rankLength == 5ll) {
|
||||
values = std::vector<int64_t>{
|
||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||
static_cast<int64_t>(inputChannelsPerGroup),
|
||||
1,
|
||||
static_cast<int64_t>(kernelSize),
|
||||
static_cast<int64_t>(kernelSize)};
|
||||
}
|
||||
|
||||
weights = std::make_shared<ngraph::opset1::Reshape>(
|
||||
weights,
|
||||
ngraph::opset1::Constant::create(
|
||||
element::i64,
|
||||
Shape{ static_cast<size_t>(rankLength) + 1ul },
|
||||
rankLength == 3 ?
|
||||
std::vector<int64_t> {
|
||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||
static_cast<int64_t>(inputChannelsPerGroup),
|
||||
static_cast<int64_t>(kernelSize) } :
|
||||
std::vector<int64_t> {
|
||||
calculatedDimention == 0 ? -1 : static_cast<int64_t>(groupCount),
|
||||
calculatedDimention == 1 ? -1 : static_cast<int64_t>(outputChannelsCount / groupCount),
|
||||
static_cast<int64_t>(inputChannelsPerGroup),
|
||||
static_cast<int64_t>(kernelSize),
|
||||
static_cast<int64_t>(kernelSize) }),
|
||||
values),
|
||||
true);
|
||||
}
|
||||
}
|
||||
@ -175,7 +196,7 @@ std::shared_ptr<ngraph::Function> GroupConvolutionFunction::getOriginal(
|
||||
const bool addReshape,
|
||||
const bool addPrecisionPreserved) {
|
||||
const auto rankLength = inputShape.rank().is_dynamic() ? 4 : inputShape.rank().get_length();
|
||||
OPENVINO_ASSERT(rankLength == 3 || rankLength == 4, "not supported input shape rank: ", rankLength);
|
||||
OPENVINO_ASSERT(rankLength == 3 || rankLength == 4 || rankLength == 5, "not supported input shape rank: ", rankLength);
|
||||
|
||||
const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user