diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp index 238c26c9539..93a2ab8f299 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp @@ -21,7 +21,8 @@ bool MKLDNNTransposeNode::isSupportedOperation(const std::shared_ptrget_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() != ov::op::v0::Constant::get_type_info_static()) { + if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() != ov::op::v0::Constant::get_type_info_static()) { + // TODO: Support parameterized Order input for dynamic shapes. errorMessage = "Constant expected as the second input for static shapes."; return false; } @@ -39,7 +40,7 @@ MKLDNNTransposeNode::MKLDNNTransposeNode(const std::shared_ptr& op, co } if (op->get_input_node_ptr(INPUT_ORDER_IDX)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) { - constMap[INPUT_ORDER_IDX] = true; + isInputOrderConst = true; order = ov::as_type(op->get_input_node_ptr(INPUT_ORDER_IDX))->cast_vector(); if (order.empty()) { @@ -68,37 +69,39 @@ void MKLDNNTransposeNode::initSupportedPrimitiveDescriptors() { config.outConfs.resize(1); config.inConfs[INPUT_DATA_IDX].inPlace = -1; config.inConfs[INPUT_DATA_IDX].constant = false; - config.inConfs[INPUT_ORDER_IDX].constant = constMap[INPUT_ORDER_IDX]; + config.inConfs[INPUT_ORDER_IDX].constant = isInputOrderConst; config.inConfs[INPUT_ORDER_IDX].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc( - getOriginalInputPrecisionAtPort(INPUT_ORDER_IDX), getInputShapeAtPort(INPUT_ORDER_IDX)); + Precision::I32, getInputShapeAtPort(INPUT_ORDER_IDX)); config.outConfs[0].inPlace = -1; config.outConfs[0].constant = false; - if (getInputShapeAtPort(0).getRank() == 4 || getInputShapeAtPort(0).getRank() == 5) { - config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getInputShapeAtPort(0)); - config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getOutputShapeAtPort(0)); + const auto& inputDataShape = getInputShapeAtPort(INPUT_DATA_IDX); + const auto& outputDataShape = getOutputShapeAtPort(0); + if (inputDataShape.getRank() == 4 || inputDataShape.getRank() == 5) { + config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, inputDataShape); + config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, outputDataShape); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); - auto srcDims = getInputShapeAtPort(0).getDims(); + const auto& srcDims = inputDataShape.getDims(); if (srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % 8 == 0) { - config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp8c)->createSharedDesc(prec, getInputShapeAtPort(0)); + config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp8c)->createSharedDesc(prec, inputDataShape); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); } if (srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % 16 == 0) { - config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp16c)->createSharedDesc(prec, getInputShapeAtPort(0)); + config.inConfs[0].desc = creatorsMap.at(LayoutType::nCsp16c)->createSharedDesc(prec, inputDataShape); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); } if (prec == Precision::FP32 || prec == Precision::I8 || prec == Precision::U8) { - config.inConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, getInputShapeAtPort(0)); - config.outConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, getOutputShapeAtPort(0)); + config.inConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, inputDataShape); + config.outConfs[0].desc = creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, outputDataShape); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); } } else { // general plain case - config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getInputShapeAtPort(0)); - config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, getOutputShapeAtPort(0)); + config.inConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, inputDataShape); + config.outConfs[0].desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, outputDataShape); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown}); } } @@ -114,7 +117,7 @@ void MKLDNNTransposeNode::prepareParams() { params.src_block_dims = srcDesc->getBlockDims(); auto dstDesc = getChildEdgeAt(0)->getMemory().GetDescWithType(); params.dst_block_dims = dstDesc->getBlockDims(); - if (!constMap[INPUT_ORDER_IDX]) { + if (!isInputOrderConst) { auto orderPtr = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); auto orderLen = getParentEdgeAt(0)->getMemoryPtr()->GetSize(); params.order.assign(orderPtr, orderPtr + orderLen); @@ -141,7 +144,7 @@ void MKLDNNTransposeNode::createPrimitive() { } params.data_size = getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc->getPrecision().size(); - if (constMap[INPUT_ORDER_IDX]) + if (isInputOrderConst) params.order = order; auto srcDesc = getParentEdgeAt(INPUT_DATA_IDX)->getMemory().GetDescWithType(); params.src_block_order = srcDesc->getOrder(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h index 0bd71612479..0b45d826159 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h @@ -86,7 +86,7 @@ private: } }; - bool constMap[3] = { false }; + bool isInputOrderConst = false; static constexpr size_t INPUT_DATA_IDX = 0lu; static constexpr size_t INPUT_ORDER_IDX = 1lu; diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/transpose.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/transpose.cpp index ee8c44398a5..bb6527f1b52 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/transpose.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/transpose.cpp @@ -115,13 +115,53 @@ const std::vector netPrecisionsPerChannels = { Precision::FP32 }; -const std::vector - staticInputShapes4D = { - {{}, {{{2, 32, 10, 20}}}} +const std::vector staticInputShapes4D = { + { + {}, + { // Static shapes + {{2, 16, 21, 10}} + } + }, + { + {}, + { // Static shapes + {{3, 16, 11, 12}} + } + }, + { + {}, + { // Static shapes + {{4, 32, 16, 14}} + } + }, + { + {}, + { // Static shapes + {{16, 32, 5, 16}} + } + } }; -const std::vector - dynamicInputShapes4D = { - {{{2, ov::Dimension(20, 40), 10, 20}}, {{{2, 32, 10, 20}, {2, 10, 10, 20}}}} +const std::vector dynamicInputShapes4D = { + { + { // Origin dynamic shapes + {ov::Dimension(1, 20), ov::Dimension(10, 40), ov::Dimension(10, 40), ov::Dimension(10, 40)} + }, + { // Dynamic shapes instances + {{1, 32, 21, 10}}, + {{2, 25, 11, 12}}, + {{4, 15, 16, 14}}, + {{7, 10, 20, 16}} + } + }, + { + { // Origin dynamic shapes + {-1, -1, -1, -1} + }, + { // Dynamic shapes instances + {{1, 24, 21, 8}}, + {{2, 16, 11, 6}} + } + } }; const std::vector> inputOrder4D = { @@ -162,7 +202,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes4D_Transpose, TransposeLayerCPUTest, ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config), - ::testing::ValuesIn(std::vector{})), + ::testing::Values(CPUSpecificParams{})), TransposeLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_staticShapes4D_PermutePerChannels, TransposeLayerCPUTest, @@ -182,16 +222,32 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes4D_PermutePerChannels, TransposeLaye ::testing::ValuesIn(netPrecisionsPerChannels), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config), - ::testing::Values(cpuParams_nhwc)), + ::testing::Values(CPUSpecificParams{})), TransposeLayerCPUTest::getTestCaseName); -const std::vector - staticInputShapes5D = { - {{}, {{{2, 32, 5, 10, 20}}}} +const std::vector staticInputShapes5D = { + { + {}, + { // Static shapes + {{2, 16, 5, 6, 5}}, + {{3, 16, 6, 5, 6}}, + {{4, 32, 5, 6, 5}}, + {{5, 32, 6, 5, 6}} + } + } }; -const std::vector - dynamicInputShapes5D = { - {{{2, ov::Dimension(20, 40), 5, 10, 20}}, {{{2, 32, 5, 10, 20}, {2, 20, 5, 10, 20}}}} +const std::vector dynamicInputShapes5D = { + { + { // Origin dynamic shapes + {ov::Dimension(1, 20), ov::Dimension(5, 150), ov::Dimension(5, 40), ov::Dimension(5, 40), ov::Dimension(5, 40)} + }, + { // Dynamic shapes instances + {{1, 32, 5, 6, 5}}, + {{2, 32, 6, 5, 6}}, + {{4, 55, 5, 6, 5}}, + {{3, 129, 6, 5, 6}} + } + } }; const std::vector> inputOrder5D = { @@ -240,7 +296,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes5D_Transpose, TransposeLayerCPUTest, ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config), - ::testing::ValuesIn(std::vector{})), + ::testing::Values(CPUSpecificParams{})), TransposeLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_staticShapes5D_PermutePerChannels, TransposeLayerCPUTest, @@ -260,7 +316,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes5D_PermutePerChannels, TransposeLaye ::testing::ValuesIn(netPrecisionsPerChannels), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config), - ::testing::Values(cpuParams_ndhwc)), + ::testing::Values(CPUSpecificParams{})), TransposeLayerCPUTest::getTestCaseName); } // namespace