[CPU] Fixed issue with unsupported reorder case for groupped convolutions (#893)
This commit is contained in:
@@ -661,6 +661,13 @@ MKLDNNMemoryDesc::operator InferenceEngine::TensorDesc() const {
|
||||
blkDims.push_back(8);
|
||||
layout = Layout::BLOCKED;
|
||||
break;
|
||||
case memory::gOdhwi8o:
|
||||
order = {0, 1, 2, 3, 4, 5, 1};
|
||||
blkDims = dims;
|
||||
blkDims[1] = blkDims[1] / 8 + (blkDims[1] % 8 ? 1 : 0);
|
||||
blkDims.push_back(8);
|
||||
layout = Layout::BLOCKED;
|
||||
break;
|
||||
case memory::nChw16c:
|
||||
order = {0, 1, 2, 3, 1};
|
||||
blkDims = dims;
|
||||
@@ -676,6 +683,13 @@ MKLDNNMemoryDesc::operator InferenceEngine::TensorDesc() const {
|
||||
blkDims.push_back(16);
|
||||
layout = Layout::BLOCKED;
|
||||
break;
|
||||
case memory::gOdhwi16o:
|
||||
order = {0, 1, 2, 3, 4, 5, 1};
|
||||
blkDims = dims;
|
||||
blkDims[1] = blkDims[1] / 16 + (blkDims[1] % 16 ? 1 : 0);
|
||||
blkDims.push_back(16);
|
||||
layout = Layout::BLOCKED;
|
||||
break;
|
||||
case memory::Ohwi8o:
|
||||
order = {0, 1, 2, 3, 0};
|
||||
blkDims = dims;
|
||||
@@ -1267,6 +1281,13 @@ MKLDNNMemoryDesc::MKLDNNMemoryDesc(const TensorDesc& tDesc):
|
||||
} else if (blkdDims[6] == 16) {
|
||||
mkldnnFormat = memory::format::Goidhw16g;
|
||||
}
|
||||
} else if (order.size() == 7 &&
|
||||
order[0] == 0 && order[1] == 1 && order[2] == 2 && order[3] == 3 && order[4] == 4 && order[5] == 5 && order[6] == 1) {
|
||||
if (blkdDims[6] == 8) {
|
||||
mkldnnFormat = memory::format::gOdhwi8o;
|
||||
} else if (blkdDims[6] == 16) {
|
||||
mkldnnFormat = memory::format::gOdhwi16o;
|
||||
}
|
||||
} else if (order.size() == 8 &&
|
||||
order[0] == 0 && order[1] == 1 && order[2] == 3 && order[3] == 4 && order[4] == 2 && order[5] == 5 &&
|
||||
order[6] == 1 && order[7] == 2) {
|
||||
|
||||
@@ -120,13 +120,18 @@ void MKLDNNReorderNode::createReorderPrimitive(const mkldnn::memory::desc &srcDe
|
||||
// Code block below tries to detect such cases and reinterpret data planar formats (e.g. nchw)
|
||||
// as grouped weights planar formats (e.g. goihw) since they have same physical memory layout.
|
||||
if (MKLDNNMemory::GetPlainFormat(src_blocked->GetDims()) == src_blocked->GetFormat() &&
|
||||
MKLDNNMemory::IsGroupedFormat(dst_blocked->GetFormat())) {
|
||||
src_blocked->GetDims().size() + 1 == dst_blocked->GetDims().size()) {
|
||||
try {
|
||||
mkldnn::memory::dims newDims = dst_blocked->GetDims();
|
||||
mkldnn::memory::format newFormat;
|
||||
newFormat = src_blocked->GetDims().size() == 4 ? memory::goihw :
|
||||
src_blocked->GetDims().size() == 5 ? memory::goidhw :
|
||||
src_blocked->GetFormat();
|
||||
if (MKLDNNMemory::IsGroupedFormat(dst_blocked->GetFormat())) {
|
||||
newFormat = src_blocked->GetDims().size() == 4 ? memory::goihw :
|
||||
src_blocked->GetDims().size() == 5 ? memory::goidhw :
|
||||
src_blocked->GetFormat();
|
||||
} else {
|
||||
newFormat = src_blocked->GetDims().size() == 4 ? memory::ncdhw :
|
||||
src_blocked->GetFormat();
|
||||
}
|
||||
|
||||
auto newDesc = mkldnn::memory::desc(newDims, src_blocked->GetDataType(), newFormat);
|
||||
src_blocked->Create(newDesc, srcPtr, false);
|
||||
|
||||
@@ -11,14 +11,15 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
return {
|
||||
// TODO: Issue 26264
|
||||
R"(.*(MaxPool|AvgPool).*S\(1\.2\).*Rounding=CEIL.*)",
|
||||
// TODO: Issue 31839
|
||||
R"(.*(QuantConvBackpropData3D).*)",
|
||||
// TODO: Issue 31841
|
||||
R"(.*(QuantGroupConvBackpropData3D).*)",
|
||||
// TODO: Issue 31843
|
||||
R"(.*(QuantGroupConvBackpropData2D)*QG=Perchannel.*)",
|
||||
// TODO: Issue 32023
|
||||
R"(.*(QuantGroupConvBackpropData2D)*QG=Pertensor.*)",
|
||||
R"(.*(QuantConvBackpropData3D).*)",
|
||||
R"(.*(QuantConvBackpropData2D).*(QG=Perchannel).*)",
|
||||
R"(.*(QuantGroupConvBackpropData2D).*(QG=Perchannel).*)",
|
||||
// TODO: Issue 33886
|
||||
R"(.*(QuantGroupConv2D).*)",
|
||||
R"(.*(QuantGroupConv3D).*)",
|
||||
// TODO: Issue 31845
|
||||
R"(.*(FakeQuantize).*)",
|
||||
R"(.*(EltwiseLayerTest).*IS=\(.*\..*\..*\..*\..*\).*secondaryInputType=PARAMETER.*opType=SCALAR.*)",
|
||||
|
||||
@@ -19,7 +19,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
const std::vector<size_t> numOutChannels = {16, 32};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
// FIXME: Perchannel tests fail because of bug in LPT
|
||||
const std::vector<QuantizationGranularity > granularity = {Pertensor, Perchannel};
|
||||
|
||||
/* ============= 2D GroupConvolutionBackpropData ============= */
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "subgraph_tests/quantized_group_convolution.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
using namespace ngraph::helpers;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32
|
||||
};
|
||||
|
||||
|
||||
const std::vector<size_t> numOutChannels = {3, 24, 48};
|
||||
const std::vector<size_t> numGroups = {3};
|
||||
|
||||
const std::vector<size_t > levels = {256};
|
||||
const std::vector<QuantizationGranularity> granularity = {Pertensor, Perchannel};
|
||||
const std::vector<bool> quantizeWeights = {false, true};
|
||||
|
||||
/* ============= 2D GroupConvolution ============= */
|
||||
const std::vector<std::vector<size_t >> inputShapes2D = {{1, 3, 10, 10}, {1, 24, 10, 10}};
|
||||
const std::vector<std::vector<size_t >> kernels2D = {{1, 1}, {3, 3}};
|
||||
const std::vector<std::vector<size_t >> strides2D = {{1, 1}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins2D = {{0, 0}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds2D = {{0, 0}};
|
||||
const std::vector<std::vector<size_t >> dilations2D = {{1, 1}};
|
||||
|
||||
|
||||
const auto quantGroupConv2DParams = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels2D),
|
||||
::testing::ValuesIn(strides2D),
|
||||
::testing::ValuesIn(padBegins2D),
|
||||
::testing::ValuesIn(padEnds2D),
|
||||
::testing::ValuesIn(dilations2D),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::ValuesIn(numGroups),
|
||||
::testing::ValuesIn(levels),
|
||||
::testing::ValuesIn(granularity),
|
||||
::testing::ValuesIn(quantizeWeights)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(QuantGroupConv2D, QuantGroupConvLayerTest,
|
||||
::testing::Combine(
|
||||
quantGroupConv2DParams,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::ValuesIn(inputShapes2D),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
QuantGroupConvLayerTest::getTestCaseName);
|
||||
|
||||
/* ============= 3D GroupConvolution ============= */
|
||||
const std::vector<std::vector<size_t >> inputShapes3D = {{1, 3, 5, 5, 5}, {1, 24, 5, 5, 5}};
|
||||
const std::vector<std::vector<size_t >> kernels3D = {{3, 3, 3}};
|
||||
const std::vector<std::vector<size_t >> strides3D = {{1, 1, 1}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins3D = {{0, 0, 0}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds3D = {{0, 0, 0}};
|
||||
const std::vector<std::vector<size_t >> dilations3D = {{1, 1, 1}};
|
||||
|
||||
const auto quantGroupConv3DParams = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels3D),
|
||||
::testing::ValuesIn(strides3D),
|
||||
::testing::ValuesIn(padBegins3D),
|
||||
::testing::ValuesIn(padEnds3D),
|
||||
::testing::ValuesIn(dilations3D),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::ValuesIn(numGroups),
|
||||
::testing::ValuesIn(levels),
|
||||
::testing::ValuesIn(granularity),
|
||||
::testing::ValuesIn(quantizeWeights)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(QuantGroupConv3D, QuantGroupConvLayerTest,
|
||||
::testing::Combine(
|
||||
quantGroupConv3DParams,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::ValuesIn(inputShapes3D),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
QuantGroupConvLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
@@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
|
||||
typedef std::tuple<
|
||||
InferenceEngine::SizeVector,
|
||||
InferenceEngine::SizeVector,
|
||||
std::vector<ptrdiff_t>,
|
||||
std::vector<ptrdiff_t>,
|
||||
InferenceEngine::SizeVector,
|
||||
size_t,
|
||||
size_t,
|
||||
size_t,
|
||||
ngraph::helpers::QuantizationGranularity,
|
||||
bool> quantGroupConvSpecificParams;
|
||||
typedef std::tuple<
|
||||
quantGroupConvSpecificParams,
|
||||
InferenceEngine::Precision,
|
||||
InferenceEngine::SizeVector,
|
||||
LayerTestsUtils::TargetDevice> quantGroupConvLayerTestParamsSet;
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
class QuantGroupConvLayerTest : public testing::WithParamInterface<quantGroupConvLayerTestParamsSet>,
|
||||
public LayerTestsUtils::LayerTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<quantGroupConvLayerTestParamsSet> obj);
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
||||
@@ -53,6 +53,8 @@ std::string QuantConvBackpropDataLayerTest::getTestCaseName(testing::TestParamIn
|
||||
}
|
||||
|
||||
void QuantConvBackpropDataLayerTest::SetUp() {
|
||||
threshold = 0.5f;
|
||||
|
||||
quantConvBackpropDataSpecificParams groupConvBackpropDataParams;
|
||||
std::vector<size_t> inputShape;
|
||||
auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include <functional_test_utils/skip_tests_config.hpp>
|
||||
|
||||
#include "ie_core.hpp"
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
|
||||
#include "subgraph_tests/quantized_group_convolution.hpp"
|
||||
|
||||
using ngraph::helpers::QuantizationGranularity;
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
std::string QuantGroupConvLayerTest::getTestCaseName(testing::TestParamInfo<quantGroupConvLayerTestParamsSet> obj) {
|
||||
quantGroupConvSpecificParams groupConvParams;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
InferenceEngine::SizeVector inputShapes;
|
||||
std::string targetDevice;
|
||||
std::tie(groupConvParams, netPrecision, inputShapes, targetDevice) = obj.param;
|
||||
ngraph::op::PadType padType = ngraph::op::PadType::AUTO;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd;
|
||||
size_t convOutChannels, numGroups;
|
||||
size_t quantLevels;
|
||||
QuantizationGranularity quantGranularity;
|
||||
bool quantizeWeights;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, quantLevels, quantGranularity, quantizeWeights) = groupConvParams;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
|
||||
result << "K" << CommonTestUtils::vec2str(kernel) << "_";
|
||||
result << "S" << CommonTestUtils::vec2str(stride) << "_";
|
||||
result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||
result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||
result << "D=" << CommonTestUtils::vec2str(dilation) << "_";
|
||||
result << "O=" << convOutChannels << "_";
|
||||
result << "G=" << numGroups << "_";
|
||||
result << "AP=" << padType << "_";
|
||||
result << "Levels=" << quantLevels << "_";
|
||||
result << "QG=" << quantGranularity << "_";
|
||||
result << "QW=" << quantizeWeights << "_";
|
||||
result << "netPRC=" << netPrecision.name() << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void QuantGroupConvLayerTest::SetUp() {
|
||||
threshold = 0.5f;
|
||||
|
||||
quantGroupConvSpecificParams groupConvParams;
|
||||
std::vector<size_t> inputShape;
|
||||
auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
|
||||
std::tie(groupConvParams, netPrecision, inputShape, targetDevice) = this->GetParam();
|
||||
ngraph::op::PadType padType = ngraph::op::PadType::AUTO;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd;
|
||||
size_t convOutChannels, numGroups;
|
||||
size_t quantLevels;
|
||||
size_t quantGranularity;
|
||||
bool quantizeWeights;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, quantLevels, quantGranularity, quantizeWeights) = groupConvParams;
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||
|
||||
std::vector<size_t> dataFqConstShapes(inputShape.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
dataFqConstShapes[1] = inputShape[1];
|
||||
auto dataFq = ngraph::builder::makeFakeQuantize(paramOuts[0], ngPrc, quantLevels, dataFqConstShapes);
|
||||
|
||||
std::vector<size_t> weightsShapes = {convOutChannels, inputShape[1]};
|
||||
if (weightsShapes[0] % numGroups || weightsShapes[1] % numGroups)
|
||||
throw std::runtime_error("incorrect shape for QuantGroupConvolution");
|
||||
weightsShapes[0] /= numGroups;
|
||||
weightsShapes[1] /= numGroups;
|
||||
weightsShapes.insert(weightsShapes.begin(), numGroups);
|
||||
weightsShapes.insert(weightsShapes.end(), kernel.begin(), kernel.end());
|
||||
|
||||
std::vector<float> weightsData;
|
||||
auto weightsNode = ngraph::builder::makeConstant(ngPrc, weightsShapes, weightsData, weightsData.empty());
|
||||
|
||||
std::vector<size_t> weightsFqConstShapes(weightsShapes.size(), 1);
|
||||
if (quantGranularity == ngraph::helpers::Perchannel)
|
||||
weightsFqConstShapes[0] = weightsShapes[0];
|
||||
|
||||
std::shared_ptr<ngraph::Node> weights;
|
||||
if (quantizeWeights) {
|
||||
weights = ngraph::builder::makeFakeQuantize(weightsNode, ngPrc, quantLevels, weightsFqConstShapes);
|
||||
} else {
|
||||
weights = weightsNode;
|
||||
}
|
||||
|
||||
auto groupConv = std::dynamic_pointer_cast<ngraph::opset1::GroupConvolution>(
|
||||
ngraph::builder::makeGroupConvolution(dataFq, weights, ngPrc, stride, padBegin, padEnd, dilation, padType));
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(groupConv)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "QuantGroupConvolution");
|
||||
}
|
||||
|
||||
TEST_P(QuantGroupConvLayerTest, CompareWithRefs) {
|
||||
Run();
|
||||
}
|
||||
} // namespace LayerTestsDefinitions
|
||||
@@ -54,6 +54,8 @@ std::string QuantGroupConvBackpropDataLayerTest::getTestCaseName(testing::TestPa
|
||||
}
|
||||
|
||||
void QuantGroupConvBackpropDataLayerTest::SetUp() {
|
||||
threshold = 0.5f;
|
||||
|
||||
quantGroupConvBackpropDataSpecificParams groupConvBackpropDataParams;
|
||||
std::vector<size_t> inputShape;
|
||||
auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
|
||||
|
||||
@@ -49,6 +49,17 @@ std::shared_ptr<ngraph::Node> makeGroupConvolution(const ngraph::Output<Node> &i
|
||||
const std::vector<float> &filterWeights = {},
|
||||
const std::vector<float> &biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeGroupConvolution(const ngraph::Output<Node> &in,
|
||||
const ngraph::Output<Node> &weights,
|
||||
const element::Type &type,
|
||||
const std::vector<size_t> &strides,
|
||||
const std::vector<ptrdiff_t> &padsBegin,
|
||||
const std::vector<ptrdiff_t> &padsEnd,
|
||||
const std::vector<size_t> &dilations,
|
||||
const op::PadType &autoPad,
|
||||
bool addBiases = false,
|
||||
const std::vector<float> &biasesWeights = {});
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeConvolutionBackpropData(const ngraph::Output<Node> &in,
|
||||
const element::Type &type,
|
||||
const std::vector<size_t> &filterSize,
|
||||
|
||||
@@ -34,8 +34,21 @@ std::shared_ptr<Node> makeGroupConvolution(const ngraph::Output<Node> &in,
|
||||
filterWeightsShape.insert(filterWeightsShape.begin(), numGroups);
|
||||
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
|
||||
auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
|
||||
auto conv = std::make_shared<opset1::GroupConvolution>(in, filterWeightsNode, strides, padsBegin, padsEnd, dilations,
|
||||
autoPad);
|
||||
|
||||
return makeGroupConvolution(in, filterWeightsNode, type, strides, padsBegin, padsEnd, dilations, autoPad, addBiases, biasesWeights);
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> makeGroupConvolution(const ngraph::Output<Node> &in,
|
||||
const ngraph::Output<Node> &weights,
|
||||
const element::Type &type,
|
||||
const std::vector<size_t> &strides,
|
||||
const std::vector<ptrdiff_t> &padsBegin,
|
||||
const std::vector<ptrdiff_t> &padsEnd,
|
||||
const std::vector<size_t> &dilations,
|
||||
const op::PadType &autoPad,
|
||||
bool addBiases,
|
||||
const std::vector<float> &biasesWeights) {
|
||||
auto conv = std::make_shared<opset1::GroupConvolution>(in, weights, strides, padsBegin, padsEnd, dilations, autoPad);
|
||||
if (addBiases) {
|
||||
bool randomBiases = biasesWeights.empty();
|
||||
auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
|
||||
|
||||
Reference in New Issue
Block a user