[CPU] Concat dynamism support (#8405)

This commit is contained in:
Maxim Andronov 2021-11-16 14:32:03 +03:00 committed by GitHub
parent 5512c36358
commit b9c5a477b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 606 additions and 213 deletions

View File

@ -33,11 +33,6 @@ namespace {
bool MKLDNNConcatNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto concatOp = ngraph::as_type_ptr<const ngraph::op::v0::Concat>(op);
if (!concatOp) {
errorMessage = "Node is not an instance of the Concat operation.";
@ -66,14 +61,14 @@ MKLDNNConcatNode::MKLDNNConcatNode(const std::shared_ptr<ngraph::Node>& op, cons
}
void MKLDNNConcatNode::getSupportedDescriptors() {
auto& firstParentDims = getInputShapeAtPort(0).getStaticDims();
const auto& firstParentDims = getInputShapeAtPort(0).getDims();
for (size_t i = 1; i < getParentEdges().size(); i++) {
auto& dims = getInputShapeAtPort(i).getStaticDims();
const auto& dims = getInputShapeAtPort(i).getDims();
bool incorrectDims = false;
for (size_t j = 0; j < firstParentDims.size(); j++) {
if (j == axis)
continue;
if (dims.size() != firstParentDims.size() || firstParentDims[j] != dims[j]) {
if (dims.size() != firstParentDims.size() || !dimsEqualWeak(firstParentDims[j], dims[j])) {
incorrectDims = true;
break;
}
@ -84,10 +79,13 @@ void MKLDNNConcatNode::getSupportedDescriptors() {
}
// we need the first dims before axis to be 1 to avoid the reorder in the edge between the first parent and this concat
// TODO [DS]: inplace
if (!isDynamicNode()) {
const auto& childDims = outputShapes[0].getStaticDims();
if (std::all_of(childDims.begin(), childDims.begin() + axis, [](size_t dim) { return dim == 1; }))
canBeInPlace = true;
}
}
void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
@ -116,14 +114,14 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
// check if blocked layouts are available the channels size should be evenly divided by the block size to avoid slow oneDNN ref implementation
if (dstShape.getRank() > channelAxis) {
for (auto item : { std::make_pair(8lu, LayoutType::nCsp8c), std::make_pair(16lu, LayoutType::nCsp16c)}) {
const VectorDims &blkDims = dstShape.getStaticDims();
if (blkDims[channelAxis] % item.first)
const VectorDims &blkDims = dstShape.getDims();
if (blkDims[channelAxis] == Shape::UNDEFINED_DIM || blkDims[channelAxis] % item.first != 0)
continue;
bool blocked = true;
for (size_t i = 0; i < getParentEdges().size(); i++) {
auto& srcDims = getInputShapeAtPort(i).getStaticDims();
if (srcDims[channelAxis] % item.first) {
auto& srcDims = getInputShapeAtPort(i).getDims();
if (srcDims[channelAxis] == Shape::UNDEFINED_DIM || srcDims[channelAxis] % item.first != 0) {
blocked = false;
break;
}
@ -153,7 +151,13 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
for (size_t i = 0; i < getParentEdges().size(); ++i) {
config.inConfs[i].inPlace = -1;
config.inConfs[i].constant = false;
config.inConfs[i].desc = itr->second->createDesc(inputPrecision, getInputShapeAtPort(i)).cloneWithUndefStridesAndOffset();
auto desc = itr->second->createSharedDesc(inputPrecision, getInputShapeAtPort(i));
// TODO [DS]: inplace
if (isDynamicNode()) {
config.inConfs[i].desc = desc;
} else {
config.inConfs[i].desc = desc->cloneWithUndefStridesAndOffset();
}
}
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::ref);
if (itr->first != LayoutType::nspc) {
@ -167,11 +171,12 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() {
return;
}
}
// TODO [DS]: inplace
if (!canBeInPlace)
return;
// Optimized inplace case
for (auto refPdIndex : pdIndexesToReuse) {
const auto& refConfig = supportedPrimitiveDescriptors[refPdIndex].getConfig();
auto config = refConfig;
@ -259,7 +264,7 @@ void MKLDNNConcatNode::selectOptimalPrimitiveDescriptor() {
}
size_t maxCount = 0;
auto outDims = getOutputShapeAtPort(0).getStaticDims();
const auto &outDims = getOutputShapeAtPort(0).getDims();
LayoutType convertTo = LayoutType::ncsp;
for (auto &it : formatFrequency) {
if (it.second > maxCount) {
@ -276,13 +281,13 @@ void MKLDNNConcatNode::selectOptimalPrimitiveDescriptor() {
for (auto& item : { std::make_pair(8lu, LayoutType::nCsp8c), std::make_pair(16lu, LayoutType::nCsp16c) }) {
if (convertTo == item.second) {
if (outDims[1] % item.first != 0) {
if (outDims[channelAxis] == Shape::UNDEFINED_DIM || outDims[1] % item.first != 0) {
convertTo = LayoutType::ncsp;
break;
}
for (size_t i = 0; i < getParentEdges().size(); i++) {
auto& inpDims = getInputShapeAtPort(i).getStaticDims();
if (inpDims[1] % item.first != 0) {
const auto& inpDims = getInputShapeAtPort(i).getDims();
if (inpDims[channelAxis] == Shape::UNDEFINED_DIM || inpDims[1] % item.first != 0) {
convertTo = LayoutType::ncsp;
break;
}
@ -330,26 +335,27 @@ bool MKLDNNConcatNode::isOptimized() const {
return getSelectedPrimitiveDescriptor() && getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].inPlace >= 0;
}
void MKLDNNConcatNode::createPrimitive() {
if (prim || isOptimized())
bool MKLDNNConcatNode::needPrepareParams() const {
if (canOptimizeNspc) {
return false;
}
return inputShapesModified();
}
void MKLDNNConcatNode::prepareParams() {
if (canOptimizeNspc || isOptimized())
return;
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
const auto& dstMemPtr = getChildEdgesAtPort(0)[0]->getMemoryPtr();
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
IE_THROW() << "Destination memory didn't allocate.";
if (getSelectedPrimitiveDescriptor() == nullptr)
IE_THROW() << "Preferable primitive descriptor is not set.";
//check if selected Tensor descriptor has nspc layout and concat axis is C
if (axis == channelAxis && getChildEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nspc)) {
canOptimizeNspc = true;
return;
}
std::vector<memory::desc> srcs_d;
for (size_t i = 0; i < getParentEdges().size(); i++) {
auto& srcMemPtr = getParentEdgeAt(i)->getMemoryPtr();
const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr();
if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) {
auto parent = getParentEdgeAt(i)->getParent();
IE_THROW() << "Source memory from " << parent->getName() << " didn't allocate for node "
@ -357,7 +363,7 @@ void MKLDNNConcatNode::createPrimitive() {
}
auto desc = srcMemPtr->GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc();
auto& dims = getInputShapeAtPort(i).getStaticDims();
const auto& dims = srcMemPtr->getStaticDims();
for (size_t j = 0; j < dims.size(); j++) {
desc.data.dims[j] = dims[j];
}
@ -365,8 +371,8 @@ void MKLDNNConcatNode::createPrimitive() {
srcs_d.emplace_back(desc);
}
auto desc = getChildEdgeAt(0)->getMemory().GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc();
auto& dims = getOutputShapeAtPort(0).getStaticDims();
auto desc = dstMemPtr->GetDescWithType<DnnlMemoryDesc>()->getDnnlDesc();
const auto& dims = dstMemPtr->getStaticDims();
for (size_t i = 0; i < dims.size(); i++) {
desc.data.dims[i] = dims[i];
desc.data.padded_dims[i] = dims[i];
@ -376,6 +382,14 @@ void MKLDNNConcatNode::createPrimitive() {
prim.reset(new concat(primitive_desc));
}
void MKLDNNConcatNode::createPrimitive() {
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
size_t MKLDNNConcatNode::inverseOrder(const SizeVector& order, size_t axis) {
for (size_t i = 0; i < order.size(); i++) {
if (axis == order[i]) {
@ -408,9 +422,7 @@ void MKLDNNConcatNode::initOptimalPrimitiveDescriptor() {
}
auto config = selected_pd->getConfig();
if (isConfigDefined(config))
return;
if (!isDynamicNode() && !isConfigDefined(config)) {
for (size_t i = 0; i < config.outConfs.size(); i++) {
if (config.outConfs[i].desc->isDefined())
continue;
@ -468,6 +480,10 @@ void MKLDNNConcatNode::initOptimalPrimitiveDescriptor() {
initDescriptor(config);
}
// check if selected Tensor descriptor has nspc layout and concat axis is C
canOptimizeNspc = axis == channelAxis && getSelectedPrimitiveDescriptor()->getConfig().outConfs.front().desc->hasLayoutType(LayoutType::nspc);
}
void MKLDNNConcatNode::execute(mkldnn::stream strm) {
if (isOptimized()) {
return;

View File

@ -23,6 +23,7 @@ public:
void selectOptimalPrimitiveDescriptor() override;
bool created() const override;
void execute(mkldnn::stream strm) override;
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
bool isOptimized() const;
@ -31,6 +32,9 @@ public:
return !isOptimized();
}
bool needPrepareParams() const override;
void prepareParams() override;
private:
size_t axis = 0;
bool canBeInPlace = false;

View File

@ -2,68 +2,82 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ngraph_functions/builders.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace ov::test;
using namespace CPUTestUtils;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
size_t, // Concat axis
std::vector<std::vector<size_t>>, // Input shapes
InferenceEngine::Precision, // Network precision
std::string, // Device name
std::vector<InputShape>, // Input shapes
ElementType, // Network precision
CPUSpecificParams
> concatCPUTestParams;
class ConcatLayerCPUTest : public testing::WithParamInterface<concatCPUTestParams>,
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(testing::TestParamInfo<concatCPUTestParams> obj) {
int axis;
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
std::string targetName;
std::vector<InputShape> inputShapes;
ElementType netPrecision;
CPUSpecificParams cpuParams;
std::tie(axis, inputShapes, netPrecision, targetName, cpuParams) = obj.param;
std::tie(axis, inputShapes, netPrecision, cpuParams) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "IS=";
for (const auto& shape : inputShapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
}
result << "TS=";
for (const auto& shape : inputShapes) {
result << "(";
if (!shape.second.empty()) {
for (const auto& itr : shape.second) {
result << CommonTestUtils::vec2str(itr);
}
}
result << ")_";
}
result << "axis=" << axis << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "trgDev=" << targetName << "_";
result << "netPRC=" << netPrecision << "_";
result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str();
}
protected:
void SetUp() override {
targetDevice = CommonTestUtils::DEVICE_CPU;
int axis;
std::vector<std::vector<size_t>> inputShape;
InferenceEngine::Precision netPrecision;
std::vector<InputShape> inputShape;
ElementType netPrecision;
CPUSpecificParams cpuParams;
std::tie(axis, inputShape, netPrecision, targetDevice, cpuParams) = this->GetParam();
inPrc = outPrc = netPrecision;
std::tie(axis, inputShape, netPrecision, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
selectedType += std::string("_") + inPrc.name();
selectedType += std::string("_") + InferenceEngine::details::convertPrecision(netPrecision).name();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, inputShape);
init_input_shapes(inputShape);
auto params = ngraph::builder::makeDynamicParams(netPrecision, inputDynamicShapes);
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
auto concat = std::make_shared<ngraph::opset1::Concat>(paramOuts, axis);
function = makeNgraphFunction(ngPrc, params, concat, "concat");
function = makeNgraphFunction(netPrecision, params, concat, "ConcatCPU");
}
};
TEST_P(ConcatLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run();
CheckPluginRelatedResults(executableNetwork, "Concatenation");
run();
// CheckPluginRelatedresult(executableNetwork, "Concatenation");
}
namespace {
@ -89,151 +103,510 @@ const auto blocked16_4D_ref = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, "ref"}
const auto blocked16_5D_ref = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, "ref"};
// List of precisions natively supported by mkldnn.
const std::vector<Precision> netPrecisions = {
Precision::I8,
Precision::I32,
Precision::FP32,
Precision::BF16
const std::vector<ElementType> netPrecisions = {
ElementType::i8,
ElementType::i32,
ElementType::f32,
ElementType::bf16
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block8_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5}, {2, 16, 3, 5}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D, blocked8_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(static_shapes_to_test_representation({{3, 32, 3, 5}, {3, 32, 3, 5}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_Block_axis1 = {
{
// {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, 32, -1, -1}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}}, // input 0
{{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}}, // input 1
{{-1, 64, -1, -1}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}} // input 2
},
{
{{{1, 5}, 32, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}},
{{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}},
{{{1, 3}, 64, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}}
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes4D_Block_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_4D_ref, blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_axis1 = {
{
{{-1, -1, -1, -1}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}},
{{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}},
{{-1, -1, -1, -1}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}}
},
{
{{{1, 3}, {8, 32}, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}},
{{{1, 3}, {3, 16}, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}},
{{{1, 3}, {1, 64}, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}}
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes4D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_Block_axis2 = {
{
{{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{-1, 16, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{-1, 16, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
{
{{{1, 3}, 16, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{{1, 3}, 16, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes4D_Block_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_4D_ref, blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_axis2 = {
{
{{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{-1, -1, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{-1, -1, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
{
{{{1, 3}, {1, 16}, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}},
{{{1, 3}, {1, 16}, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}},
{{{1, 3}, {1, 16}, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes4D_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_Block_axis3 = {
{
{{-1, 32, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}, }},
{{-1, 32, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{-1, 32, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
{
{{{1, 3}, 32, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}},
{{{1, 3}, 32, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{{1, 3}, 32, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes4D_Block_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_4D_ref, blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes4D_axis3 = {
{
{{-1, -1, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}},
{{-1, -1, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{-1, -1, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
{
{{{1, 3}, {1, 32}, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}},
{{{1, 3}, {1, 32}, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}},
{{{1, 3}, {1, 32}, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes4D_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_4D_ref, planarChannels_4D)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block8_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5, 7}, {2, 16, 3, 5, 7}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D, blocked8_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16_static, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(static_shapes_to_test_representation({{2, 32, 3, 5, 7}, {2, 32, 3, 5, 7}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis1 = {
{
{{-1, 32, -1, -1, -1}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}},
{{-1, 16, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}},
{{-1, 64, -1, -1, -1}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}},
},
{
{{{1, 3}, 32, {1, 10}, {2, 8}, {6, 10}}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}},
{{{1, 3}, 16, {1, 10}, {2, 8}, {6, 10}}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}},
{{{1, 3}, 64, {1, 10}, {2, 8}, {6, 10}}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes5D_Block_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis1 = {
{
{{-1, -1, -1, -1, -1}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}},
{{-1, -1, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 20, 10, 2, 8}, {3, 5, 1, 8, 10}, }},
{{-1, -1, -1, -1, -1}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}},
},
{
{{{1, 3}, {3, 5}, {1, 10}, {2, 8}, {6, 10}}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}},
{{{1, 3}, {5, 20}, {1, 10}, {2, 8}, {4, 10}}, {{2, 16, 5, 7, 6}, {1, 20, 10, 2, 8}, {3, 5, 1, 8, 10}, }},
{{{1, 3}, {1, 17}, {1, 10}, {2, 8}, {6, 10}}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes5D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis2 = {
{
{{-1, 16, -1, -1, -1}, {{2, 16, 5, 8, 7}, {1, 16, 16, 1, 2}, {3, 16, 2, 5, 8}, }},
{{-1, 16, -1, -1, -1}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}},
{{-1, 16, -1, -1, -1}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}},
},
{
{{{1, 3}, 16, {2, 16}, {1, 8}, {2, 8}}, {{2, 16, 5, 8, 7}, {1, 16, 16, 1, 2}, {3, 16, 2, 5, 8}, }},
{{{1, 5}, 16, {1, 11}, {1, 8}, {1, 8}}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}},
{{{1, 6}, 16, {1, 10}, {1, 8}, {2, 10}}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes5D_Block_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis2 = {
{
{{-1, -1, -1, -1, -1}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}},
{{-1, -1, -1, -1, -1}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}},
{{-1, -1, -1, -1, -1}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}},
},
{
{{{1, 3}, {4, 20}, {1, 16}, {1, 8}, {2, 8}}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}},
{{{1, 3}, {4, 20}, {1, 11}, {1, 10}, {1, 15}}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}},
{{{1, 3}, {1, 20}, {1, 15}, {1, 10}, {2, 8}}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes5D_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis3 = {
{
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}}},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}},
},
{
{{{1, 3}, 32, {1, 7}, {2, 16}, {3, 7}}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}, }},
{{{1, 5}, 32, {1, 7}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}},
{{{1, 6}, 32, {1, 15}, {1, 10}, {1, 20}}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes5D_Block_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis3 = {
{
{{-1, -1, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}}},
{{-1, -1, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}},
{{-1, -1, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}},
},
{
{{{1, 7}, {7, 32}, {1, 7}, {1, 16}, {3, 14}}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}, }},
{{{1, 7}, {7, 32}, {1, 10}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}},
{{{1, 7}, {1, 32}, {1, 10}, {1, 10}, {1, 10}}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_3, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(3),
::testing::ValuesIn(inputShapes5D_axis3),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_Block_axis4 = {
{
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 5}, {1, 32, 1, 1, 16}, {3, 32, 7, 9, 2}, }},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}},
{{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}},
},
{
{{{1, 15}, 32, {1, 10}, {1, 10}, {1, 16}}, {{2, 32, 4, 5, 5}, {1, 32, 1, 1, 16}, {3, 32, 7, 9, 2}, }},
{{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}},
{{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_4, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(4),
::testing::ValuesIn(inputShapes5D_Block_axis4),
::testing::ValuesIn(netPrecisions),
::testing::Values(blocked8_5D_ref, blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes5D_axis4 = {
{
{{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}},
{{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}},
{{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}},
},
{
{{{1, 3}, {1, 14}, {1, 7}, {1, 10}, {2, 16}}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}},
{{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 11}}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}},
{{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 10}}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_4, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(4),
::testing::ValuesIn(inputShapes5D_axis4),
::testing::ValuesIn(netPrecisions),
::testing::Values(planar_5D_ref, planarChannels_5D)),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes_byBatch = {
static_shapes_to_test_representation({{5, 2, 2, 2}, {2, 2, 2, 2}}),
static_shapes_to_test_representation({{1, 3, 5}, {3, 3, 5}}),
static_shapes_to_test_representation({{4, 3, 2}, {1, 3, 2}}),
// 5D
{
{{-1, -1, -1, -1, -1}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }},
{{-1, -1, -1, -1, -1}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}},
{{-1, -1, -1, -1, -1}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}},
},
{
{{{3, 10}, {7, 32}, {1, 9}, {1, 10}, {1, 5}}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }},
{{{3, 7}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}},
{{{1, 6}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}},
},
// 4D
{
{{-1, -1, -1, -1}, {{10, 32, 4, 5}, {4, 7, 1, 1}, {3, 20, 7, 9}, }},
{{-1, -1, -1, -1}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}},
{{-1, -1, -1, -1}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}},
},
{
{{{1, 10}, {1, 32}, {1, 7}, {1, 9}}, {{10, 32, 4, 5}, {4, 7, 1, 1}, {3, 20, 7, 9}, }},
{{{3, 7}, {7, 32}, {1, 7}, {1, 9}}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}},
{{{1, 6}, {7, 32}, {1, 7}, {1, 9}}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}},
}
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::ValuesIn(inputShapes_byBatch),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes3D_axis1 = {
static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}),
{
{{-1, -1, -1}, {{2, 5, 12}, {1, 16, 1}, {5, 2, 6}, }},
{{-1, -1, -1}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}},
{{-1, -1, -1}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}},
},
{
{{{1, 5}, {2, 16}, {1, 12}}, {{2, 5, 12}, {1, 16, 1}, {5, 2, 6}, }},
{{{1, 5}, {1, 11}, {1, 21}}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}},
{{{1, 5}, {1, 10}, {1, 12}}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes3D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes3D_axis2 = {
static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}),
{
{{-1, -1, -1}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}}},
{{-1, -1, -1}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}},
{{-1, -1, -1}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}},
},
{
{{{1, 4}, {1, 4}, {2, 16}}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}, }},
{{{1, 4}, {1, 4}, {1, 11}}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}},
{{{1, 4}, {1, 4}, {1, 10}}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis2, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2),
::testing::ValuesIn(inputShapes3D_axis2),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes2D_axis1 = {
static_shapes_to_test_representation({{3, 2}, {3, 10}}),
{
{{-1, -1}, {{19, 5}, {1, 16}, {8, 2}, }},
{{-1, -1}, {{19, 1}, {1, 3}, {8, 11}}},
{{-1, -1}, {{19, 10}, {1, 5}, {8, 1}}},
},
{
{{{1, 19}, {2, 16}}, {{19, 5}, {1, 16}, {8, 2}, }},
{{{1, 19}, {1, 11}}, {{19, 1}, {1, 3}, {8, 11}}},
{{{1, 19}, {1, 10}}, {{19, 10}, {1, 5}, {8, 1}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D_axis1, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::ValuesIn(inputShapes2D_axis1),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
const std::vector<std::vector<InputShape>> inputShapes1D = {
static_shapes_to_test_representation({ov::Shape{5}, ov::Shape{5}}),
static_shapes_to_test_representation({ov::Shape{2}, ov::Shape{2}}),
static_shapes_to_test_representation({ov::Shape{1}, ov::Shape{1}}),
static_shapes_to_test_representation({ov::Shape{3}, ov::Shape{3}}),
{
{{-1}, {{19}, {8}, {5}}},
{{-1}, {{19}, {8}, {5}}},
{{-1}, {{19}, {8}, {5}}},
},
{
{{{1, 20}}, {{19}, {8}, {5}}},
{{{1, 20}}, {{19}, {8}, {5}}},
{{{1, 20}}, {{19}, {8}, {5}}},
},
};
INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::ValuesIn(inputShapes1D),
::testing::ValuesIn(netPrecisions),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
// ============================================== inPlace cases ============================================
INSTANTIATE_TEST_SUITE_P(concat_Concat4D_CPU_Block8inPlace, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 8, 3, 5},
{1, 8, 3, 5}}),
::testing::Values(static_shapes_to_test_representation({{1, 8, 3, 5}, {1, 8, 3, 5}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_4D, planarChannels_4D, blocked8_4D)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block8, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(std::vector<std::vector<size_t>>{{2, 16, 3, 5},
{2, 16, 3, 5}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_4D_ref, planarChannels_4D, blocked8_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 32, 3, 5},
{1, 32, 3, 5}}),
::testing::Values(static_shapes_to_test_representation({{1, 32, 3, 5}, {1, 32, 3, 5}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_4D)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2, 3),
::testing::Values(std::vector<std::vector<size_t>>{{3, 32, 3, 5},
{3, 32, 3, 5}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_4D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(concat_Concat5D_CPU_Block8inPlace, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 16, 3, 5, 7},
{1, 16, 3, 5, 7}}),
::testing::Values(static_shapes_to_test_representation({{1, 16, 3, 5, 7}, {1, 16, 3, 5, 7}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_5D, planarChannels_5D, blocked8_5D)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block8, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(std::vector<std::vector<size_t>>{{2, 16, 3, 5, 7},
{2, 16, 3, 5, 7}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(planar_5D_ref, planarChannels_5D, blocked8_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16inPlace, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0, 1),
::testing::Values(std::vector<std::vector<size_t>>{{1, 32, 3, 5, 7},
{1, 32, 3, 5, 7}}),
::testing::Values(static_shapes_to_test_representation({{1, 32, 3, 5, 7}, {1, 32, 3, 5, 7}})),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_5D)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(2, 3, 4),
::testing::Values(std::vector<std::vector<size_t>>{{2, 32, 3, 5, 7},
{2, 32, 3, 5, 7}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(blocked16_5D_ref)),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_inPlace, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0, 1, 2),
::testing::Values(std::vector<std::vector<size_t>>{{1, 1, 1, 10},
{1, 1, 1, 10}},
std::vector<std::vector<size_t>>{{1, 1, 5},
{1, 1, 5}}),
::testing::ValuesIn(std::vector<std::vector<InputShape>>{
static_shapes_to_test_representation({{1, 1, 1, 10}, {1, 1, 1, 10}}),
static_shapes_to_test_representation({{1, 1, 5}, {1, 1, 5}})}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::Values(std::vector<std::vector<size_t>>{{5, 2, 2, 2},
{2, 2, 2, 2}},
std::vector<std::vector<size_t>>{{1, 3, 5},
{3, 3, 5}},
std::vector<std::vector<size_t>>{{4, 3, 2},
{1, 3, 2}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1, 2),
::testing::Values(std::vector<std::vector<size_t>>{{2, 4, 5},
{2, 4, 5}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(1),
::testing::Values(std::vector<std::vector<size_t>>{{3, 2},
{3, 10}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})),
ConcatLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D, ConcatLayerCPUTest,
::testing::Combine(
::testing::Values(0),
::testing::Values(std::vector<std::vector<size_t>>{{5},
{2},
{1},
{3}}),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})),
ConcatLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions