[CPU] Dynamic shapes. Transpose tests + minor fixes. (#8155)

This commit is contained in:
Nikolay Shchegolev 2021-10-27 18:40:10 +03:00 committed by GitHub
parent a46db8a72e
commit a0a70ab53e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 92 additions and 33 deletions

View File

@ -21,7 +21,8 @@ bool MKLDNNTransposeNode::isSupportedOperation(const std::shared_ptr<const ov::N
return false; return false;
} }
if (!isDynamicNgraphNode(op) && op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() != ov::op::v0::Constant::get_type_info_static()) { if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() != ov::op::v0::Constant::get_type_info_static()) {
// TODO: Support parameterized Order input for dynamic shapes.
errorMessage = "Constant expected as the second input for static shapes."; errorMessage = "Constant expected as the second input for static shapes.";
return false; return false;
} }
@ -39,7 +40,7 @@ MKLDNNTransposeNode::MKLDNNTransposeNode(const std::shared_ptr<ov::Node>& op, co
} }
if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) { if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) {
constMap[INPUT_ORDER_IDX] = true; isInputOrderConst = true;
order = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(INPUT_ORDER_IDX))->cast_vector<size_t>(); order = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(INPUT_ORDER_IDX))->cast_vector<size_t>();
if (order.empty()) { if (order.empty()) {
@ -68,37 +69,39 @@ void MKLDNNTransposeNode::initSupportedPrimitiveDescriptors() {
config.outConfs.resize(1); config.outConfs.resize(1);
config.inConfs[INPUT_DATA_IDX].inPlace = -1; config.inConfs[INPUT_DATA_IDX].inPlace = -1;
config.inConfs[INPUT_DATA_IDX].constant = false; config.inConfs[INPUT_DATA_IDX].constant = false;
config.inConfs[INPUT_ORDER_IDX].constant = constMap[INPUT_ORDER_IDX]; config.inConfs[INPUT_ORDER_IDX].constant = isInputOrderConst;
config.inConfs[INPUT_ORDER_IDX].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc( config.inConfs[INPUT_ORDER_IDX].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(
getOriginalInputPrecisionAtPort(INPUT_ORDER_IDX), getInputShapeAtPort(INPUT_ORDER_IDX)); Precision::I32, getInputShapeAtPort(INPUT_ORDER_IDX));
config.outConfs[0].inPlace = -1; config.outConfs[0].inPlace = -1;
config.outConfs[0].constant = false; config.outConfs[0].constant = false;
if (getInputShapeAtPort(0).getRank() == 4 || getInputShapeAtPort(0).getRank() == 5) { const auto& inputDataShape = getInputShapeAtPort(INPUT_DATA_IDX);
config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getInputShapeAtPort(0)); const auto& outputDataShape = getOutputShapeAtPort(0);
config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getOutputShapeAtPort(0)); if (inputDataShape.getRank() == 4 || inputDataShape.getRank() == 5) {
config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, inputDataShape);
config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, outputDataShape);
supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown});
auto srcDims = getInputShapeAtPort(0).getDims(); const auto& srcDims = inputDataShape.getDims();
if (srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % 8 == 0) { if (srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % 8 == 0) {
config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp8c)->createSharedDesc(prec, getInputShapeAtPort(0)); config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp8c)->createSharedDesc(prec, inputDataShape);
supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown});
} }
if (srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % 16 == 0) { if (srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % 16 == 0) {
config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp16c)->createSharedDesc(prec, getInputShapeAtPort(0)); config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp16c)->createSharedDesc(prec, inputDataShape);
supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown});
} }
if (prec == Precision::FP32 || prec == Precision::I8 || prec == Precision::U8) { if (prec == Precision::FP32 || prec == Precision::I8 || prec == Precision::U8) {
config.inConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, getInputShapeAtPort(0)); config.inConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, inputDataShape);
config.outConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, getOutputShapeAtPort(0)); config.outConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, outputDataShape);
supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown});
} }
} else { } else {
// general plain case // general plain case
config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getInputShapeAtPort(0)); config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, inputDataShape);
config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getOutputShapeAtPort(0)); config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, outputDataShape);
supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown});
} }
} }
@ -114,7 +117,7 @@ void MKLDNNTransposeNode::prepareParams() {
params.src_block_dims = srcDesc->getBlockDims(); params.src_block_dims = srcDesc->getBlockDims();
auto dstDesc = getChildEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>(); auto dstDesc = getChildEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>();
params.dst_block_dims = dstDesc->getBlockDims(); params.dst_block_dims = dstDesc->getBlockDims();
if (!constMap[INPUT_ORDER_IDX]) { if (!isInputOrderConst) {
auto orderPtr = reinterpret_cast<const int32_t*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); auto orderPtr = reinterpret_cast<const int32_t*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
auto orderLen = getParentEdgeAt(0)->getMemoryPtr()->GetSize(); auto orderLen = getParentEdgeAt(0)->getMemoryPtr()->GetSize();
params.order.assign(orderPtr, orderPtr + orderLen); params.order.assign(orderPtr, orderPtr + orderLen);
@ -141,7 +144,7 @@ void MKLDNNTransposeNode::createPrimitive() {
} }
params.data_size = getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc->getPrecision().size(); params.data_size = getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc->getPrecision().size();
if (constMap[INPUT_ORDER_IDX]) if (isInputOrderConst)
params.order = order; params.order = order;
auto srcDesc = getParentEdgeAt(INPUT_DATA_IDX)->getMemory().GetDescWithType<BlockedMemoryDesc>(); auto srcDesc = getParentEdgeAt(INPUT_DATA_IDX)->getMemory().GetDescWithType<BlockedMemoryDesc>();
params.src_block_order = srcDesc->getOrder(); params.src_block_order = srcDesc->getOrder();

View File

@ -86,7 +86,7 @@ private:
} }
}; };
bool constMap[3] = { false }; bool isInputOrderConst = false;
static constexpr size_t INPUT_DATA_IDX = 0lu; static constexpr size_t INPUT_DATA_IDX = 0lu;
static constexpr size_t INPUT_ORDER_IDX = 1lu; static constexpr size_t INPUT_ORDER_IDX = 1lu;

View File

@ -115,13 +115,53 @@ const std::vector<InferenceEngine::Precision> netPrecisionsPerChannels = {
Precision::FP32 Precision::FP32
}; };
const std::vector<inputShapesPair> const std::vector<inputShapesPair> staticInputShapes4D = {
staticInputShapes4D = { {
{{}, {{{2, 32, 10, 20}}}} {},
{ // Static shapes
{{2, 16, 21, 10}}
}
},
{
{},
{ // Static shapes
{{3, 16, 11, 12}}
}
},
{
{},
{ // Static shapes
{{4, 32, 16, 14}}
}
},
{
{},
{ // Static shapes
{{16, 32, 5, 16}}
}
}
}; };
const std::vector<inputShapesPair> const std::vector<inputShapesPair> dynamicInputShapes4D = {
dynamicInputShapes4D = { {
{{{2, ov::Dimension(20, 40), 10, 20}}, {{{2, 32, 10, 20}, {2, 10, 10, 20}}}} { // Origin dynamic shapes
{ov::Dimension(1, 20), ov::Dimension(10, 40), ov::Dimension(10, 40), ov::Dimension(10, 40)}
},
{ // Dynamic shapes instances
{{1, 32, 21, 10}},
{{2, 25, 11, 12}},
{{4, 15, 16, 14}},
{{7, 10, 20, 16}}
}
},
{
{ // Origin dynamic shapes
{-1, -1, -1, -1}
},
{ // Dynamic shapes instances
{{1, 24, 21, 8}},
{{2, 16, 11, 6}}
}
}
}; };
const std::vector<std::vector<size_t>> inputOrder4D = { const std::vector<std::vector<size_t>> inputOrder4D = {
@ -162,7 +202,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes4D_Transpose, TransposeLayerCPUTest,
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(additional_config), ::testing::Values(additional_config),
::testing::ValuesIn(std::vector<CPUSpecificParams>{})), ::testing::Values(CPUSpecificParams{})),
TransposeLayerCPUTest::getTestCaseName); TransposeLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_staticShapes4D_PermutePerChannels, TransposeLayerCPUTest, INSTANTIATE_TEST_SUITE_P(smoke_staticShapes4D_PermutePerChannels, TransposeLayerCPUTest,
@ -182,16 +222,32 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes4D_PermutePerChannels, TransposeLaye
::testing::ValuesIn(netPrecisionsPerChannels), ::testing::ValuesIn(netPrecisionsPerChannels),
::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(additional_config), ::testing::Values(additional_config),
::testing::Values(cpuParams_nhwc)), ::testing::Values(CPUSpecificParams{})),
TransposeLayerCPUTest::getTestCaseName); TransposeLayerCPUTest::getTestCaseName);
const std::vector<inputShapesPair> const std::vector<inputShapesPair> staticInputShapes5D = {
staticInputShapes5D = { {
{{}, {{{2, 32, 5, 10, 20}}}} {},
{ // Static shapes
{{2, 16, 5, 6, 5}},
{{3, 16, 6, 5, 6}},
{{4, 32, 5, 6, 5}},
{{5, 32, 6, 5, 6}}
}
}
}; };
const std::vector<inputShapesPair> const std::vector<inputShapesPair> dynamicInputShapes5D = {
dynamicInputShapes5D = { {
{{{2, ov::Dimension(20, 40), 5, 10, 20}}, {{{2, 32, 5, 10, 20}, {2, 20, 5, 10, 20}}}} { // Origin dynamic shapes
{ov::Dimension(1, 20), ov::Dimension(5, 150), ov::Dimension(5, 40), ov::Dimension(5, 40), ov::Dimension(5, 40)}
},
{ // Dynamic shapes instances
{{1, 32, 5, 6, 5}},
{{2, 32, 6, 5, 6}},
{{4, 55, 5, 6, 5}},
{{3, 129, 6, 5, 6}}
}
}
}; };
const std::vector<std::vector<size_t>> inputOrder5D = { const std::vector<std::vector<size_t>> inputOrder5D = {
@ -240,7 +296,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes5D_Transpose, TransposeLayerCPUTest,
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(additional_config), ::testing::Values(additional_config),
::testing::ValuesIn(std::vector<CPUSpecificParams>{})), ::testing::Values(CPUSpecificParams{})),
TransposeLayerCPUTest::getTestCaseName); TransposeLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_staticShapes5D_PermutePerChannels, TransposeLayerCPUTest, INSTANTIATE_TEST_SUITE_P(smoke_staticShapes5D_PermutePerChannels, TransposeLayerCPUTest,
@ -260,7 +316,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes5D_PermutePerChannels, TransposeLaye
::testing::ValuesIn(netPrecisionsPerChannels), ::testing::ValuesIn(netPrecisionsPerChannels),
::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(additional_config), ::testing::Values(additional_config),
::testing::Values(cpuParams_ndhwc)), ::testing::Values(CPUSpecificParams{})),
TransposeLayerCPUTest::getTestCaseName); TransposeLayerCPUTest::getTestCaseName);
} // namespace } // namespace