[CPU] Concat dynamism support (#8405)

This commit is contained in:
Maxim Andronov 2021-11-16 14:32:03 +03:00 committed by GitHub
parent 5512c36358
commit b9c5a477b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 606 additions and 213 deletions

View File

@ -33,11 +33,6 @@ namespace {
bool MKLDNNConcatNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool MKLDNNConcatNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto concatOp = ngraph::as_type_ptr<const ngraph::op::v0::Concat>(op); const auto concatOp = ngraph::as_type_ptr<const ngraph::op::v0::Concat>(op);
if (!concatOp) { if (!concatOp) {
errorMessage = "Node is not an instance of the Concat operation."; errorMessage = "Node is not an instance of the Concat operation.";
@ -66,14 +61,14 @@ MKLDNNConcatNode::MKLDNNConcatNode(const std::shared_ptr<ngraph::Node>& op, cons
} }
void MKLDNNConcatNode::getSupportedDescriptors() { void MKLDNNConcatNode::getSupportedDescriptors() {
auto& firstParentDims = getInputShapeAtPort(0).getStaticDims(); const auto& firstParentDims = getInputShapeAtPort(0).getDims();
for (size_t i = 1; i < getParentEdges().size(); i++) { for (size_t i = 1; i < getParentEdges().size(); i++) {
auto& dims = getInputShapeAtPort(i).getStaticDims(); const auto& dims = getInputShapeAtPort(i).getDims();
bool incorrectDims = false; bool incorrectDims = false;
for (size_t j = 0; j < firstParentDims.size(); j++) { for (size_t j = 0; j < firstParentDims.size(); j++) {
if (j == axis) if (j == axis)
continue; continue;
if (dims.size() != firstParentDims.size() || firstParentDims[j] != dims[j]) { if (dims.size() != firstParentDims.size() || !dimsEqualWeak(firstParentDims[j], dims[j])) {
incorrectDims = true; incorrectDims = true;
break; break;
} }
@ -84,9 +79,12 @@ void MKLDNNConcatNode::getSupportedDescriptors() {
} }
// we need the first dims before axis to be 1 to avoid the reorder in the edge between the first parent and this concat // we need the first dims before axis to be 1 to avoid the reorder in the edge between the first parent and this concat
const auto& childDims = outputShapes[0].getStaticDims(); // TODO [DS]: inplace
if (std::all_of(childDims.begin(), childDims.begin() + axis, [](size_t dim) { return dim == 1; })) if (!isDynamicNode()) {
canBeInPlace = true; const auto& childDims = outputShapes[0].getStaticDims();
if (std::all_of(childDims.begin(), childDims.begin() + axis, [](size_t dim) { return dim == 1; }))
canBeInPlace = true;
}
} }
void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() { void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
@ -116,14 +114,14 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
// check if blocked layouts are available the channels size should be evenly divided by the block size to avoid slow oneDNN ref implementation // check if blocked layouts are available the channels size should be evenly divided by the block size to avoid slow oneDNN ref implementation
if (dstShape.getRank() > channelAxis) { if (dstShape.getRank() > channelAxis) {
for (auto item : { std::make_pair(8lu, LayoutType::nCsp8c), std::make_pair(16lu, LayoutType::nCsp16c)}) { for (auto item : { std::make_pair(8lu, LayoutType::nCsp8c), std::make_pair(16lu, LayoutType::nCsp16c)}) {
const VectorDims &blkDims = dstShape.getStaticDims(); const VectorDims &blkDims = dstShape.getDims();
if (blkDims[channelAxis] % item.first) if (blkDims[channelAxis] == Shape::UNDEFINED_DIM || blkDims[channelAxis] % item.first != 0)
continue; continue;
bool blocked = true; bool blocked = true;
for (size_t i = 0; i < getParentEdges().size(); i++) { for (size_t i = 0; i < getParentEdges().size(); i++) {
auto& srcDims = getInputShapeAtPort(i).getStaticDims(); auto& srcDims = getInputShapeAtPort(i).getDims();
if (srcDims[channelAxis] % item.first) { if (srcDims[channelAxis] == Shape::UNDEFINED_DIM || srcDims[channelAxis] % item.first != 0) {
blocked = false; blocked = false;
break; break;
} }
@ -153,7 +151,13 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
for (size_t i = 0; i < getParentEdges().size(); ++i) { for (size_t i = 0; i < getParentEdges().size(); ++i) {
config.inConfs[i].inPlace = -1; config.inConfs[i].inPlace = -1;
config.inConfs[i].constant = false; config.inConfs[i].constant = false;
config.inConfs[i].desc = itr->second->createDesc(inputPrecision, getInputShapeAtPort(i)).cloneWithUndefStridesAndOffset(); auto desc = itr->second->createSharedDesc(inputPrecision, getInputShapeAtPort(i));
// TODO [DS]: inplace
if (isDynamicNode()) {
config.inConfs[i].desc = desc;
} else {
config.inConfs[i].desc = desc->cloneWithUndefStridesAndOffset();
}
} }
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::ref); supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::ref);
if (itr->first != LayoutType::nspc) { if (itr->first != LayoutType::nspc) {
@ -167,11 +171,12 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
return; return;
} }
} }
// TODO [DS]: inplace
if (!canBeInPlace) if (!canBeInPlace)
return; return;
// Optimized inplace case // Optimized inplace case
for (auto refPdIndex : pdIndexesToReuse) { for (auto refPdIndex : pdIndexesToReuse) {
const auto& refConfig = supportedPrimitiveDescriptors[refPdIndex].getConfig(); const auto& refConfig = supportedPrimitiveDescriptors[refPdIndex].getConfig();
auto config = refConfig; auto config = refConfig;
@ -259,7 +264,7 @@ void MKLDNNConcatNode::selectOptimalPrimitiveDescriptor() {
} }
size_t maxCount = 0; size_t maxCount = 0;
auto outDims = getOutputShapeAtPort(0).getStaticDims(); const auto &outDims = getOutputShapeAtPort(0).getDims();
LayoutType convertTo = LayoutType::ncsp; LayoutType convertTo = LayoutType::ncsp;
for (auto &it : formatFrequency) { for (auto &it : formatFrequency) {
if (it.second > maxCount) { if (it.second > maxCount) {
@ -276,13 +281,13 @@ void MKLDNNConcatNode::selectOptimalPrimitiveDescriptor() {
for (auto& item : { std::make_pair(8lu, LayoutType::nCsp8c), std::make_pair(16lu, LayoutType::nCsp16c) }) { for (auto& item : { std::make_pair(8lu, LayoutType::nCsp8c), std::make_pair(16lu, LayoutType::nCsp16c) }) {
if (convertTo == item.second) { if (convertTo == item.second) {
if (outDims[1] % item.first != 0) { if (outDims[channelAxis] == Shape::UNDEFINED_DIM || outDims[1] % item.first != 0) {
convertTo = LayoutType::ncsp; convertTo = LayoutType::ncsp;
break; break;
} }
for (size_t i = 0; i < getParentEdges().size(); i++) { for (size_t i = 0; i < getParentEdges().size(); i++) {
auto& inpDims = getInputShapeAtPort(i).getStaticDims(); const auto& inpDims = getInputShapeAtPort(i).getDims();
if (inpDims[1] % item.first != 0) { if (inpDims[channelAxis] == Shape::UNDEFINED_DIM || inpDims[1] % item.first != 0) {
convertTo = LayoutType::ncsp; convertTo = LayoutType::ncsp;
break; break;
} }
@ -330,26 +335,27 @@ bool MKLDNNConcatNode::isOptimized() const {
return getSelectedPrimitiveDescriptor() && getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].inPlace >= 0; return getSelectedPrimitiveDescriptor() && getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].inPlace >= 0;
} }
void MKLDNNConcatNode::createPrimitive() { bool MKLDNNConcatNode::needPrepareParams() const {
if (prim || isOptimized()) if (canOptimizeNspc) {
return false;
}
return inputShapesModified();
}
void MKLDNNConcatNode::prepareParams() {
if (canOptimizeNspc || isOptimized())
return; return;
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); const auto& dstMemPtr = getChildEdgesAtPort(0)[0]->getMemoryPtr();
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
IE_THROW() << "Destination memory didn't allocate."; IE_THROW() << "Destination memory didn't allocate.";
if (getSelectedPrimitiveDescriptor() == nullptr) if (getSelectedPrimitiveDescriptor() == nullptr)
IE_THROW() << "Preferable primitive descriptor is not set."; IE_THROW() << "Preferable primitive descriptor is not set.";
//check if selected Tensor descriptor has nspc layout and concat axis is C
if (axis == channelAxis && getChildEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nspc)) {
canOptimizeNspc = true;
return;
}
std::vector<memory::desc> srcs_d; std::vector<memory::desc> srcs_d;
for (size_t i = 0; i < getParentEdges().size(); i++) { for (size_t i = 0; i < getParentEdges().size(); i++) {
auto& srcMemPtr = getParentEdgeAt(i)->getMemoryPtr(); const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr();
if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) { if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) {
auto parent = getParentEdgeAt(i)->getParent(); auto parent = getParentEdgeAt(i)->getParent();
IE_THROW() << "Source memory from " << parent->getName() << " didn't allocate for node " IE_THROW() << "Source memory from " << parent->getName() << " didn't allocate for node "
@ -357,7 +363,7 @@ void MKLDNNConcatNode::createPrimitive() {
} }
auto desc = srcMemPtr->GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc(); auto desc = srcMemPtr->GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc();
auto& dims = getInputShapeAtPort(i).getStaticDims(); const auto& dims = srcMemPtr->getStaticDims();
for (size_t j = 0; j < dims.size(); j++) { for (size_t j = 0; j < dims.size(); j++) {
desc.data.dims[j] = dims[j]; desc.data.dims[j] = dims[j];
} }
@ -365,8 +371,8 @@ void MKLDNNConcatNode::createPrimitive() {
srcs_d.emplace_back(desc); srcs_d.emplace_back(desc);
} }
auto desc = getChildEdgeAt(0)->getMemory().GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc(); auto desc = dstMemPtr->GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc();
auto& dims = getOutputShapeAtPort(0).getStaticDims(); const auto& dims = dstMemPtr->getStaticDims();
for (size_t i = 0; i < dims.size(); i++) { for (size_t i = 0; i < dims.size(); i++) {
desc.data.dims[i] = dims[i]; desc.data.dims[i] = dims[i];
desc.data.padded_dims[i] = dims[i]; desc.data.padded_dims[i] = dims[i];
@ -376,6 +382,14 @@ void MKLDNNConcatNode::createPrimitive() {
prim.reset(new concat(primitive_desc)); prim.reset(new concat(primitive_desc));
} }
void MKLDNNConcatNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
size_t MKLDNNConcatNode::inverseOrder(const SizeVector& order, size_t axis) { size_t MKLDNNConcatNode::inverseOrder(const SizeVector& order, size_t axis) {
for (size_t i = 0; i < order.size(); i++) { for (size_t i = 0; i < order.size(); i++) {
if (axis == order[i]) { if (axis == order[i]) {
@ -408,64 +422,66 @@ void MKLDNNConcatNode::initOptimalPrimitiveDescriptor() {
} }
auto config = selected_pd->getConfig(); auto config = selected_pd->getConfig();
if (isConfigDefined(config)) if (!isDynamicNode() && !isConfigDefined(config)) {
return; for (size_t i = 0; i < config.outConfs.size(); i++) {
if (config.outConfs[i].desc->isDefined())
continue;
for (size_t i = 0; i < config.outConfs.size(); i++) { int num = getChildEdgeAt(i)->getOutputNum();
if (config.outConfs[i].desc->isDefined()) if (num >= 0) {
continue; auto childConf = getChildEdgeAt(i)->getChild()->getSelectedPrimitiveDescriptor()->getConfig().inConfs[num];
childConf.desc = childConf.desc->cloneWithNewPrecision(config.outConfs[i].desc->getPrecision());
int num = getChildEdgeAt(i)->getOutputNum(); if (getChildEdgeAt(i)->getChild()->getSelectedPrimitiveDescriptor()) {
if (num >= 0) { if (!childConf.desc->isDefined() && childConf.inPlace >= 0)
auto childConf = getChildEdgeAt(i)->getChild()->getSelectedPrimitiveDescriptor()->getConfig().inConfs[num]; getChildEdgeAt(i)->getChild()->initOptimalPrimitiveDescriptor();
childConf.desc = childConf.desc->cloneWithNewPrecision(config.outConfs[i].desc->getPrecision());
if (getChildEdgeAt(i)->getChild()->getSelectedPrimitiveDescriptor()) { if (childConf.desc->isDefined() && childConf.desc->isCompatible(*config.outConfs[i].desc)) {
if (!childConf.desc->isDefined() && childConf.inPlace >= 0) config.outConfs[i].desc = childConf.desc;
getChildEdgeAt(i)->getChild()->initOptimalPrimitiveDescriptor(); continue;
}
if (childConf.desc->isDefined() && childConf.desc->isCompatible(*config.outConfs[i].desc)) {
config.outConfs[i].desc = childConf.desc;
continue;
} }
} }
// reset undefined offsets
config.outConfs[i].desc = config.outConfs[i].desc->as<BlockedMemoryDesc>()->cloneWithDefaultStridesAndOffset();
} }
auto firstOutBlockingDesc = config.outConfs[0].desc->as<BlockedMemoryDesc>();
size_t offset = 0;
for (size_t i = 0; i < config.inConfs.size(); i++) {
auto oldDesc = config.inConfs[i].desc;
auto inpBlockingDesc = oldDesc->as<BlockedMemoryDesc>();
// reset undefined offsets config.inConfs[i].desc = std::make_shared<CpuBlockedMemoryDesc>(inpBlockingDesc->getPrecision(),
config.outConfs[i].desc = config.outConfs[i].desc->as<BlockedMemoryDesc>()->cloneWithDefaultStridesAndOffset(); inpBlockingDesc->getShape(),
} inpBlockingDesc->getBlockDims(),
auto firstOutBlockingDesc = config.outConfs[0].desc->as<BlockedMemoryDesc>(); inpBlockingDesc->getOrder(),
size_t offset = 0; firstOutBlockingDesc->getOffsetPadding() + offset,
for (size_t i = 0; i < config.inConfs.size(); i++) { firstOutBlockingDesc->getOffsetPaddingToData(),
auto oldDesc = config.inConfs[i].desc; firstOutBlockingDesc->getStrides());
auto inpBlockingDesc = oldDesc->as<BlockedMemoryDesc>(); size_t axisSize = 1;
config.inConfs[i].desc = std::make_shared<CpuBlockedMemoryDesc>(inpBlockingDesc->getPrecision(), auto firstInpBlockingDesc = config.inConfs[0].desc->as<BlockedMemoryDesc>();
inpBlockingDesc->getShape(), if (firstInpBlockingDesc->hasLayoutType(LayoutType::nspc)) {
inpBlockingDesc->getBlockDims(), // This is more general and works for any "direct" Layout (such as nchw or nhwc), but it doesn't work for blocked
inpBlockingDesc->getOrder(), size_t realAxis = inverseOrder(firstInpBlockingDesc->getOrder(), axis);
firstOutBlockingDesc->getOffsetPadding() + offset, for (size_t j = realAxis; j < inpBlockingDesc->getBlockDims().size(); j++) {
firstOutBlockingDesc->getOffsetPaddingToData(), size_t jj = firstInpBlockingDesc->getOrder()[j];
firstOutBlockingDesc->getStrides()); axisSize *= inpBlockingDesc->getBlockDims()[jj];
size_t axisSize = 1; }
} else {
auto firstInpBlockingDesc = config.inConfs[0].desc->as<BlockedMemoryDesc>(); // This works for nchw and nchw8c/nchw16c
if (firstInpBlockingDesc->hasLayoutType(LayoutType::nspc)) { for (size_t j = axis; j < inpBlockingDesc->getBlockDims().size(); j++) {
// This is more general and works for any "direct" Layout (such as nchw or nhwc), but it doesn't work for blocked axisSize *= inpBlockingDesc->getBlockDims()[j];
size_t realAxis = inverseOrder(firstInpBlockingDesc->getOrder(), axis); }
for (size_t j = realAxis; j < inpBlockingDesc->getBlockDims().size(); j++) {
size_t jj = firstInpBlockingDesc->getOrder()[j];
axisSize *= inpBlockingDesc->getBlockDims()[jj];
}
} else {
// This works for nchw and nchw8c/nchw16c
for (size_t j = axis; j < inpBlockingDesc->getBlockDims().size(); j++) {
axisSize *= inpBlockingDesc->getBlockDims()[j];
} }
offset += axisSize;
} }
offset += axisSize; initDescriptor(config);
} }
initDescriptor(config);
// check if selected Tensor descriptor has nspc layout and concat axis is C
canOptimizeNspc = axis == channelAxis && getSelectedPrimitiveDescriptor()->getConfig().outConfs.front().desc->hasLayoutType(LayoutType::nspc);
} }
void MKLDNNConcatNode::execute(mkldnn::stream strm) { void MKLDNNConcatNode::execute(mkldnn::stream strm) {

View File

@ -23,6 +23,7 @@ public:
void selectOptimalPrimitiveDescriptor() override; void selectOptimalPrimitiveDescriptor() override;
bool created() const override; bool created() const override;
void execute(mkldnn::stream strm) override; void execute(mkldnn::stream strm) override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
bool isOptimized() const; bool isOptimized() const;
@ -31,6 +32,9 @@ public:
return !isOptimized(); return !isOptimized();
} }
bool needPrepareParams() const override;
void prepareParams() override;
private: private:
size_t axis = 0; size_t axis = 0;
bool canBeInPlace = false; bool canBeInPlace = false;

View File

@ -2,68 +2,82 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "test_utils/cpu_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine; using namespace ov::test;
using namespace CPUTestUtils; using namespace CPUTestUtils;
namespace CPULayerTestsDefinitions { namespace CPULayerTestsDefinitions {
typedef std::tuple< typedef std::tuple<
size_t, // Concat axis size_t, // Concat axis
std::vector<std::vector<size_t>>, // Input shapes std::vector<InputShape>, // Input shapes
InferenceEngine::Precision, // Network precision ElementType, // Network precision
std::string, // Device name
CPUSpecificParams CPUSpecificParams
> concatCPUTestParams; > concatCPUTestParams;
class ConcatLayerCPUTest : public testing::WithParamInterface<concatCPUTestParams>, class ConcatLayerCPUTest : public testing::WithParamInterface<concatCPUTestParams>,
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { virtual public SubgraphBaseTest, public CPUTestsBase {
public: public:
static std::string getTestCaseName(testing::TestParamInfo<concatCPUTestParams> obj) { static std::string getTestCaseName(testing::TestParamInfo<concatCPUTestParams> obj) {
int axis; int axis;
std::vector<std::vector<size_t>> inputShapes; std::vector<InputShape> inputShapes;
InferenceEngine::Precision netPrecision; ElementType netPrecision;
std::string targetName;
CPUSpecificParams cpuParams; CPUSpecificParams cpuParams;
std::tie(axis, inputShapes, netPrecision, targetName, cpuParams) = obj.param; std::tie(axis, inputShapes, netPrecision, cpuParams) = obj.param;
std::ostringstream result; std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; result << "IS=";
for (const auto& shape : inputShapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
}
result << "TS=";
for (const auto& shape : inputShapes) {
result << "(";
if (!shape.second.empty()) {
for (const auto& itr : shape.second) {
result << CommonTestUtils::vec2str(itr);
}
}
result << ")_";
}
result << "axis=" << axis << "_"; result << "axis=" << axis << "_";
result << "netPRC=" << netPrecision.name() << "_"; result << "netPRC=" << netPrecision << "_";
result << "trgDev=" << targetName << "_";
result << CPUTestsBase::getTestCaseName(cpuParams); result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str(); return result.str();
} }
protected: protected:
void SetUp() override { void SetUp() override {
targetDevice = CommonTestUtils::DEVICE_CPU;
int axis; int axis;
std::vector<std::vector<size_t>> inputShape; std::vector<InputShape> inputShape;
InferenceEngine::Precision netPrecision; ElementType netPrecision;
CPUSpecificParams cpuParams; CPUSpecificParams cpuParams;
std::tie(axis, inputShape, netPrecision, targetDevice, cpuParams) = this->GetParam(); std::tie(axis, inputShape, netPrecision, cpuParams) = this->GetParam();
inPrc = outPrc = netPrecision;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
selectedType += std::string("_") + inPrc.name(); selectedType += std::string("_") + InferenceEngine::details::convertPrecision(netPrecision).name();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); init_input_shapes(inputShape);
auto params = ngraph::builder::makeParams(ngPrc, inputShape);
auto params = ngraph::builder::makeDynamicParams(netPrecision, inputDynamicShapes);
auto paramOuts = ngraph::helpers::convert2OutputVector( auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params)); ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
auto concat = std::make_shared<ngraph::opset1::Concat>(paramOuts, axis); auto concat = std::make_shared<ngraph::opset1::Concat>(paramOuts, axis);
function = makeNgraphFunction(ngPrc, params, concat, "concat"); function = makeNgraphFunction(netPrecision, params, concat, "ConcatCPU");
} }
}; };
TEST_P(ConcatLayerCPUTest, CompareWithRefs) { TEST_P(ConcatLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED() SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run(); run();
CheckPluginRelatedResults(executableNetwork, "Concatenation"); // CheckPluginRelatedresult(executableNetwork, "Concatenation");
} }
namespace { namespace {
@ -89,151 +103,510 @@ const auto blocked16_4D_ref = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, "ref"}
const auto blocked16_5D_ref = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, "ref"}; const auto blocked16_5D_ref = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, "ref"};
// List of precisions natively supported by mkldnn. // List of precisions natively supported by mkldnn.
const std::vector<Precision> netPrecisions = { const std::vector<ElementType> netPrecisions = {
Precision::I8, ElementType::i8,
Precision::I32, ElementType::i32,
Precision::FP32, ElementType::f32,
Precision::BF16 ElementType::bf16
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block8_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5}, {2, 16, 3, 5}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D, blocked8_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(static_shapes_to_test_representation({{3, 32, 3, 5}, {3, 32, 3, 5}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_Block_axis1 = {
{
// {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, 32, -1, -1}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}}, // input 0
{{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}}, // input 1
{{-1, 64, -1, -1}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}} // input 2
},
{
{{{1, 5}, 32, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}},
{{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}},
{{{1, 3}, 64, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}}
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes4D_Block_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_4D_ref, blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_axis1 = {
{
{{-1, -1, -1, -1}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}},
{{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}},
{{-1, -1, -1, -1}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}}
},
{
{{{1, 3}, {8, 32}, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}},
{{{1, 3}, {3, 16}, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}},
{{{1, 3}, {1, 64}, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}}
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes4D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_Block_axis2 = {
{
{{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{-1, 16, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{-1, 16, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
{
{{{1, 3}, 16, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{{1, 3}, 16, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes4D_Block_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_4D_ref, blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_axis2 = {
{
{{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{-1, -1, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{-1, -1, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
{
{{{1, 3}, {1, 16}, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{{1, 3}, {1, 16}, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{{1, 3}, {1, 16}, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes4D_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_Block_axis3 = {
{
{{-1, 32, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}, }},
{{-1, 32, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{-1, 32, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
{
{{{1, 3}, 32, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}},
{{{1, 3}, 32, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{{1, 3}, 32, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes4D_Block_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_4D_ref, blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_axis3 = {
{
{{-1, -1, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}},
{{-1, -1, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{-1, -1, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
{
{{{1, 3}, {1, 32}, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}},
{{{1, 3}, {1, 32}, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{{1, 3}, {1, 32}, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes4D_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block8_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5, 7}, {2, 16, 3, 5, 7}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D, blocked8_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(static_shapes_to_test_representation({{2, 32, 3, 5, 7}, {2, 32, 3, 5, 7}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis1 = {
{
{{-1, 32, -1, -1, -1}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}},
{{-1, 16, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}},
{{-1, 64, -1, -1, -1}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}},
},
{
{{{1, 3}, 32, {1, 10}, {2, 8}, {6, 10}}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}},
{{{1, 3}, 16, {1, 10}, {2, 8}, {6, 10}}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}},
{{{1, 3}, 64, {1, 10}, {2, 8}, {6, 10}}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes5D_Block_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis1 = {
{
{{-1, -1, -1, -1, -1}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}},
{{-1, -1, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 20, 10, 2, 8}, {3, 5, 1, 8, 10}, }},
{{-1, -1, -1, -1, -1}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}},
},
{
{{{1, 3}, {3, 5}, {1, 10}, {2, 8}, {6, 10}}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}},
{{{1, 3}, {5, 20}, {1, 10}, {2, 8}, {4, 10}}, {{2, 16, 5, 7, 6}, {1, 20, 10, 2, 8}, {3, 5, 1, 8, 10}, }},
{{{1, 3}, {1, 17}, {1, 10}, {2, 8}, {6, 10}}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes5D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis2 = {
{
{{-1, 16, -1, -1, -1}, {{2, 16, 5, 8, 7}, {1, 16, 16, 1, 2}, {3, 16, 2, 5, 8}, }},
{{-1, 16, -1, -1, -1}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}},
{{-1, 16, -1, -1, -1}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}},
},
{
{{{1, 3}, 16, {2, 16}, {1, 8}, {2, 8}}, {{2, 16, 5, 8, 7}, {1, 16, 16, 1, 2}, {3, 16, 2, 5, 8}, }},
{{{1, 5}, 16, {1, 11}, {1, 8}, {1, 8}}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}},
{{{1, 6}, 16, {1, 10}, {1, 8}, {2, 10}}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes5D_Block_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis2 = {
{
{{-1, -1, -1, -1, -1}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}},
{{-1, -1, -1, -1, -1}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}},
{{-1, -1, -1, -1, -1}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}},
},
{
{{{1, 3}, {4, 20}, {1, 16}, {1, 8}, {2, 8}}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}},
{{{1, 3}, {4, 20}, {1, 11}, {1, 10}, {1, 15}}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}},
{{{1, 3}, {1, 20}, {1, 15}, {1, 10}, {2, 8}}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes5D_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis3 = {
{
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}}},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}},
},
{
{{{1, 3}, 32, {1, 7}, {2, 16}, {3, 7}}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}, }},
{{{1, 5}, 32, {1, 7}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}},
{{{1, 6}, 32, {1, 15}, {1, 10}, {1, 20}}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes5D_Block_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis3 = {
{
{{-1, -1, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}}},
{{-1, -1, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}},
{{-1, -1, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}},
},
{
{{{1, 7}, {7, 32}, {1, 7}, {1, 16}, {3, 14}}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}, }},
{{{1, 7}, {7, 32}, {1, 10}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}},
{{{1, 7}, {1, 32}, {1, 10}, {1, 10}, {1, 10}}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes5D_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis4 = {
{
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 5}, {1, 32, 1, 1, 16}, {3, 32, 7, 9, 2}, }},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}},
},
{
{{{1, 15}, 32, {1, 10}, {1, 10}, {1, 16}}, {{2, 32, 4, 5, 5}, {1, 32, 1, 1, 16}, {3, 32, 7, 9, 2}, }},
{{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}},
{{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_4, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(4),
::testing::ValuesIn(inputShapes5D_Block_axis4),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis4 = {
{
{{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}},
{{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}},
{{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}},
},
{
{{{1, 3}, {1, 14}, {1, 7}, {1, 10}, {2, 16}}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}},
{{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 11}}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}},
{{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 10}}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_4, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(4),
::testing::ValuesIn(inputShapes5D_axis4),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes_byBatch = {
static_shapes_to_test_representation({{5, 2, 2, 2}, {2, 2, 2, 2}}),
static_shapes_to_test_representation({{1, 3, 5}, {3, 3, 5}}),
static_shapes_to_test_representation({{4, 3, 2}, {1, 3, 2}}),
// 5D
{
{{-1, -1, -1, -1, -1}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }},
{{-1, -1, -1, -1, -1}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}},
{{-1, -1, -1, -1, -1}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}},
},
{
{{{3, 10}, {7, 32}, {1, 9}, {1, 10}, {1, 5}}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }},
{{{3, 7}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}},
{{{1, 6}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}},
},
// 4D
{
{{-1, -1, -1, -1}, {{10, 32, 4, 5}, {4, 7, 1, 1}, {3, 20, 7, 9}, }},
{{-1, -1, -1, -1}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}},
{{-1, -1, -1, -1}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}},
},
{
{{{1, 10}, {1, 32}, {1, 7}, {1, 9}}, {{10, 32, 4, 5}, {4, 7, 1, 1}, {3, 20, 7, 9}, }},
{{{3, 7}, {7, 32}, {1, 7}, {1, 9}}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}},
{{{1, 6}, {7, 32}, {1, 7}, {1, 9}}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}},
}
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::ValuesIn(inputShapes_byBatch),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes3D_axis1 = {
static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}),
{
{{-1, -1, -1}, {{2, 5, 12}, {1, 16, 1}, {5, 2, 6}, }},
{{-1, -1, -1}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}},
{{-1, -1, -1}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}},
},
{
{{{1, 5}, {2, 16}, {1, 12}}, {{2, 5, 12}, {1, 16, 1}, {5, 2, 6}, }},
{{{1, 5}, {1, 11}, {1, 21}}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}},
{{{1, 5}, {1, 10}, {1, 12}}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes3D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes3D_axis2 = {
static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}),
{
{{-1, -1, -1}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}}},
{{-1, -1, -1}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}},
{{-1, -1, -1}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}},
},
{
{{{1, 4}, {1, 4}, {2, 16}}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}, }},
{{{1, 4}, {1, 4}, {1, 11}}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}},
{{{1, 4}, {1, 4}, {1, 10}}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes3D_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes2D_axis1 = {
static_shapes_to_test_representation({{3, 2}, {3, 10}}),
{
{{-1, -1}, {{19, 5}, {1, 16}, {8, 2}, }},
{{-1, -1}, {{19, 1}, {1, 3}, {8, 11}}},
{{-1, -1}, {{19, 10}, {1, 5}, {8, 1}}},
},
{
{{{1, 19}, {2, 16}}, {{19, 5}, {1, 16}, {8, 2}, }},
{{{1, 19}, {1, 11}}, {{19, 1}, {1, 3}, {8, 11}}},
{{{1, 19}, {1, 10}}, {{19, 10}, {1, 5}, {8, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D_axis1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes2D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes1D = {
static_shapes_to_test_representation({ov::Shape{5}, ov::Shape{5}}),
static_shapes_to_test_representation({ov::Shape{2}, ov::Shape{2}}),
static_shapes_to_test_representation({ov::Shape{1}, ov::Shape{1}}),
static_shapes_to_test_representation({ov::Shape{3}, ov::Shape{3}}),
{
{{-1}, {{19}, {8}, {5}}},
{{-1}, {{19}, {8}, {5}}},
{{-1}, {{19}, {8}, {5}}},
},
{
{{{1, 20}}, {{19}, {8}, {5}}},
{{{1, 20}}, {{19}, {8}, {5}}},
{{{1, 20}}, {{19}, {8}, {5}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::ValuesIn(inputShapes1D),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
// ============================================== inPlace cases ============================================
INSTANTIATE_TEST_SUITE_P(concat_Concat4D_CPU_Block8inPlace, ConcatLayerCPUTest, INSTANTIATE_TEST_SUITE_P(concat_Concat4D_CPU_Block8inPlace, ConcatLayerCPUTest,
::testing::Combine( ::testing::Combine(
::testing::Values(0, 1), ::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 8, 3, 5}, ::testing::Values(static_shapes_to_test_representation({{1, 8, 3, 5}, {1, 8, 3, 5}})),
{1, 8, 3, 5}}),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_4D, planarChannels_4D, blocked8_4D)), ::testing::Values(planar_4D, planarChannels_4D, blocked8_4D)),
ConcatLayerCPUTest::getTestCaseName); ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block8, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(std::vector<std::vector<size_t>>{{2, 16, 3, 5},
{2, 16, 3, 5}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_4D_ref, planarChannels_4D, blocked8_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace, ConcatLayerCPUTest, INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace, ConcatLayerCPUTest,
::testing::Combine( ::testing::Combine(
::testing::Values(0, 1), ::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 32, 3, 5}, ::testing::Values(static_shapes_to_test_representation({{1, 32, 3, 5}, {1, 32, 3, 5}})),
{1, 32, 3, 5}}),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_4D)), ::testing::Values(blocked16_4D)),
ConcatLayerCPUTest::getTestCaseName); ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(std::vector<std::vector<size_t>>{{3, 32, 3, 5},
{3, 32, 3, 5}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(concat_Concat5D_CPU_Block8inPlace, ConcatLayerCPUTest, INSTANTIATE_TEST_SUITE_P(concat_Concat5D_CPU_Block8inPlace, ConcatLayerCPUTest,
::testing::Combine( ::testing::Combine(
::testing::Values(0, 1), ::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 16, 3, 5, 7}, ::testing::Values(static_shapes_to_test_representation({{1, 16, 3, 5, 7}, {1, 16, 3, 5, 7}})),
{1, 16, 3, 5, 7}}),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_5D, planarChannels_5D, blocked8_5D)), ::testing::Values(planar_5D, planarChannels_5D, blocked8_5D)),
ConcatLayerCPUTest::getTestCaseName); ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block8, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(std::vector<std::vector<size_t>>{{2, 16, 3, 5, 7},
{2, 16, 3, 5, 7}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_5D_ref, planarChannels_5D, blocked8_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16inPlace, ConcatLayerCPUTest, INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16inPlace, ConcatLayerCPUTest,
::testing::Combine( ::testing::Combine(
::testing::Values(0, 1), ::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 32, 3, 5, 7}, ::testing::Values(static_shapes_to_test_representation({{1, 32, 3, 5, 7}, {1, 32, 3, 5, 7}})),
{1, 32, 3, 5, 7}}),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_5D)), ::testing::Values(blocked16_5D)),
ConcatLayerCPUTest::getTestCaseName); ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(std::vector<std::vector<size_t>>{{2, 32, 3, 5, 7},
{2, 32, 3, 5, 7}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_inPlace, ConcatLayerCPUTest, INSTANTIATE_TEST_SUITE_P(smoke_Concat_inPlace, ConcatLayerCPUTest,
::testing::Combine( ::testing::Combine(
::testing::Values(0, 1, 2), ::testing::Values(0, 1, 2),
::testing::Values(std::vector<std::vector<size_t>>{{1, 1, 1, 10}, ::testing::ValuesIn(std::vector<std::vector<InputShape>>{
{1, 1, 1, 10}}, static_shapes_to_test_representation({{1, 1, 1, 10}, {1, 1, 1, 10}}),
std::vector<std::vector<size_t>>{{1, 1, 5}, static_shapes_to_test_representation({{1, 1, 5}, {1, 1, 5}})}),
{1, 1, 5}}),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName); ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::Values(std::vector<std::vector<size_t>>{{5, 2, 2, 2},
{2, 2, 2, 2}},
std::vector<std::vector<size_t>>{{1, 3, 5},
{3, 3, 5}},
std::vector<std::vector<size_t>>{{4, 3, 2},
{1, 3, 2}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2),
::testing::Values(std::vector<std::vector<size_t>>{{2, 4, 5},
{2, 4, 5}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::Values(std::vector<std::vector<size_t>>{{3, 2},
{3, 10}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::Values(std::vector<std::vector<size_t>>{{5},
{2},
{1},
{3}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
} // namespace } // namespace
} // namespace CPULayerTestsDefinitions } // namespace CPULayerTestsDefinitions