[CPU] Added dynamism support for ShuffleChannels (#8601)

This commit is contained in:
Alexandra Sidorova
2021-11-23 17:22:33 +03:00
committed by GitHub
parent 516d510045
commit c49620bb6a
6 changed files with 264 additions and 218 deletions

View File

@@ -25,31 +25,11 @@ using namespace mkldnn::impl::cpu::x64;
bool MKLDNNShuffleChannelsNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto shuffleChannels = std::dynamic_pointer_cast<const ngraph::op::v0::ShuffleChannels>(op);
auto shuffleChannels = ov::as_type_ptr<const ngraph::op::v0::ShuffleChannels>(op);
if (!shuffleChannels) {
errorMessage = "Only opset1 ShuffleChannels operation is supported";
return false;
}
auto shapeSC = shuffleChannels->get_input_shape(0);
auto rankSC = shapeSC.size();
auto axisSC = shuffleChannels->get_axis();
auto groupSC = shuffleChannels->get_group();
if (axisSC < 0)
axisSC += rankSC;
if (axisSC < 0 || axisSC >= rankSC) {
errorMessage = "gets incorrect axis number, which should be in range of [-inputRank, inputRank).";
return false;
}
if (groupSC == 0 || shapeSC[axisSC] % groupSC) {
errorMessage = "gets incorrect group parameter('group' must evenly divide the channel dimension).";
return false;
}
} catch (...) {
return false;
}
@@ -57,25 +37,23 @@ bool MKLDNNShuffleChannelsNode::isSupportedOperation(const std::shared_ptr<const
}
MKLDNNShuffleChannelsNode::MKLDNNShuffleChannelsNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
: MKLDNNNode(op, eng, cache), permuteKernel_(nullptr), supportDynamicBatch_(false) {
: MKLDNNNode(op, eng, cache) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}
const auto shuffleChannels = std::dynamic_pointer_cast<const ngraph::op::v0::ShuffleChannels>(op);
inShape_ = shuffleChannels->get_input_shape(0);
dataRank_ = inShape_.size();
axis_ = shuffleChannels->get_axis();
if (axis_ < 0)
axis_ += dataRank_;
group_ = shuffleChannels->get_group();
groupSize_ = inShape_[axis_] / group_;
if (inputShapes.size() != 1 || outputShapes.size() != 1)
THROW_SHCH_ERROR << "has incorrect number of input/output edges.";
supportDynamicBatch_ = (axis_ != 0);
}
auto shuffleChannels = ov::as_type_ptr<const ngraph::op::v0::ShuffleChannels>(op);
attrs.group = shuffleChannels->get_group();
attrs.axis = shuffleChannels->get_axis();
attrs.dataRank = getInputShapeAtPort(0).getRank();
if (attrs.axis < 0)
attrs.axis += attrs.dataRank;
void MKLDNNShuffleChannelsNode::getSupportedDescriptors() {
supportDynamicBatch = (attrs.axis != 0);
}
void MKLDNNShuffleChannelsNode::initSupportedPrimitiveDescriptors() {
@@ -104,24 +82,22 @@ void MKLDNNShuffleChannelsNode::initSupportedPrimitiveDescriptors() {
addSupportedPrimDesc({{firstCreatorType, precision}},
{{firstCreatorType, precision}},
impl_type, supportDynamicBatch_);
impl_type, supportDynamicBatch);
addSupportedPrimDesc({{secondCreatorType, precision}},
{{secondCreatorType, precision}},
impl_type, supportDynamicBatch_);
impl_type, supportDynamicBatch);
// canUseBlocked
if (axis_ != 1) {
if (attrs.axis != 1) {
addSupportedPrimDesc({{LayoutType::nCsp8c, precision}},
{{LayoutType::nCsp8c, precision}},
impl_type, supportDynamicBatch_);
impl_type, supportDynamicBatch);
addSupportedPrimDesc({{LayoutType::nCsp16c, precision}},
{{LayoutType::nCsp16c, precision}},
impl_type, supportDynamicBatch_);
impl_type, supportDynamicBatch);
}
}
void MKLDNNShuffleChannelsNode::createPrimitive() {
if (prim)
return;
auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
auto &srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
@@ -131,43 +107,65 @@ void MKLDNNShuffleChannelsNode::createPrimitive() {
if (getSelectedPrimitiveDescriptor() == nullptr)
THROW_SHCH_ERROR << "has unidentified preferable primitive descriptor";
const bool isBlocked = getParentEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nCsp8c) ||
getParentEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nCsp16c);
const auto& memoryDesc = srcMemPtr->getDesc();
attrs.spatialRank = attrs.dataRank - attrs.axis - 1;
attrs.dataSize = memoryDesc.getPrecision().size();
attrs.layoutType = memoryDesc.hasLayoutType(LayoutType::nCsp16c) ? LayoutType::nCsp16c :
memoryDesc.hasLayoutType(LayoutType::nCsp8c) ? LayoutType::nCsp8c :
memoryDesc.hasLayoutType(LayoutType::nspc) ? LayoutType::nspc : LayoutType::ncsp;
int batchRank = axis_;
int spatialRank = dataRank_ - axis_ - 1;
if (inputShapesDefined()) {
if (needPrepareParams())
prepareParams();
updateLastInputDims();
}
}
void MKLDNNShuffleChannelsNode::prepareParams() {
auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
execPtr = std::make_shared<ShuffleChannelsExecutor>(attrs, srcMemPtr->getStaticDims(), srcMemPtr->GetDescWithType<BlockedMemoryDesc>()->getBlockDims());
}
MKLDNNShuffleChannelsNode::ShuffleChannelsExecutor::ShuffleChannelsExecutor(const ShuffleChannelsAttributes& attrs,
const VectorDims& srcDims,
const VectorDims& srcBlockedDims) {
if (!one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp))
IE_THROW() << "ShuffleChannels executor supports only 'nCsp16c', 'nCsp8c', 'nspc' or 'ncsp' layouts.";
const bool isBlocked = MKLDNNPlugin::one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c);
const bool isChannelsLast = attrs.layoutType == LayoutType::nspc;
// 2 for decomposed axis dim, 1 for composed spatial dim
int reshapedRank = batchRank + 2 + static_cast<int>(spatialRank != 0) + static_cast<int>(isBlocked && (spatialRank == 0));
const int batchRank = attrs.axis;
const int reshapedRank = batchRank + 2 + static_cast<int>(attrs.spatialRank != 0) + static_cast<int>(isBlocked && (attrs.spatialRank == 0));
PermuteParams params;
params.data_size = getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc->getPrecision().size();
params.data_size = attrs.dataSize;
params.order.resize(reshapedRank, 0);
params.src_block_order.resize(reshapedRank);
params.dst_block_order.resize(reshapedRank);
params.dst_block_dims.resize(reshapedRank);
params.src_block_dims.resize(reshapedRank);
const size_t groupSize = srcDims[attrs.axis] / attrs.group;
size_t spatialShapeSize = 1;
if (spatialRank != 0) {
for (int i = batchRank + 1; i < dataRank_; i++) {
spatialShapeSize *= inShape_[i];
if (attrs.spatialRank != 0) {
for (int i = batchRank + 1; i < attrs.dataRank; i++) {
spatialShapeSize *= srcDims[i];
}
}
auto decomposeAndTranpose = [&](int axis) {
params.src_block_dims[axis] = group_;
params.src_block_dims[axis + 1] = groupSize_;
params.src_block_dims[axis] = attrs.group;
params.src_block_dims[axis + 1] = groupSize;
params.order[axis] = axis + 1;
params.order[axis + 1] = axis;
};
const int channelDim = 1;
if (isBlocked) {
const auto blkDesc = getParentEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>();
size_t blkSize = blkDesc->getBlockDims().back();
size_t CB = div_up(inShape_[1], blkSize);
SizeVector srcBlockedDims = blkDesc->getBlockDims();
if (axis_ > channelDim) { // axis on spatial
size_t blkSize = srcBlockedDims.back();
size_t CB = srcBlockedDims[1];
if (attrs.axis > channelDim) { // axis on spatial
for (int i = 0; i < batchRank; i++) {
params.order[i] = i;
params.src_block_dims[i] = srcBlockedDims[i];
@@ -178,36 +176,36 @@ void MKLDNNShuffleChannelsNode::createPrimitive() {
params.src_block_dims[batchRank + 2] = spatialShapeSize * blkSize;
} else { // axis on batch
decomposeAndTranpose(0);
size_t spatialShapeSize = CB * blkSize;
for (int i = 2; i < dataRank_; i++) {
spatialShapeSize *= inShape_[i];
spatialShapeSize = CB * blkSize;
for (int i = 2; i < attrs.dataRank; i++) {
spatialShapeSize *= srcDims[i];
}
params.order[2] = 2;
params.src_block_dims[2] = spatialShapeSize;
}
} else if (getParentEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nspc)) {
if (axis_ == channelDim) { // axis on channel
} else if (isChannelsLast) {
if (attrs.axis == channelDim) { // axis on channel
params.order[0] = 0;
params.src_block_dims[0] = inShape_[0];
params.src_block_dims[0] = srcDims[0];
params.order[1] = 1;
params.src_block_dims[1] = spatialShapeSize;
decomposeAndTranpose(2);
} else if (axis_ > channelDim) { // axis on spatial
} else if (attrs.axis > channelDim) { // axis on spatial
for (int i = 0; i < batchRank; i++) {
if (i == 0) {
params.order[i] = i;
params.src_block_dims[i] = inShape_[i];
params.src_block_dims[i] = srcDims[i];
} else if (i == 1) {
params.order[reshapedRank - 1] = reshapedRank - 1;
params.src_block_dims[params.order[reshapedRank - 1]] = inShape_[i];
params.src_block_dims[params.order[reshapedRank - 1]] = srcDims[i];
} else if (i > 1) {
params.order[i - 1] = i - 1;
params.src_block_dims[i - 1] = inShape_[i];
params.src_block_dims[i - 1] = srcDims[i];
}
}
decomposeAndTranpose(batchRank - 1);
if (spatialRank != 0) {
if (attrs.spatialRank != 0) {
params.order[batchRank + 1] = batchRank + 1;
params.src_block_dims[batchRank + 1] = spatialShapeSize;
}
@@ -218,12 +216,12 @@ void MKLDNNShuffleChannelsNode::createPrimitive() {
}
} else {
for (int i = 0; i < batchRank; i++) {
params.src_block_dims[i] = inShape_[i];
params.src_block_dims[i] = srcDims[i];
params.order[i] = i;
}
decomposeAndTranpose(batchRank);
if (spatialRank != 0) {
if (attrs.spatialRank != 0) {
params.order[batchRank + 2] = batchRank + 2;
params.src_block_dims[batchRank + 2] = spatialShapeSize;
}
@@ -234,20 +232,30 @@ void MKLDNNShuffleChannelsNode::createPrimitive() {
for (size_t i = 0; i < reshapedRank; i++)
params.dst_block_dims[i] = params.src_block_dims[params.order[i]];
permuteKernel_ = std::unique_ptr<PermuteKernel>(new PermuteKernel(params));
permuteKernel = std::unique_ptr<PermuteKernel>(new PermuteKernel(params));
}
void MKLDNNShuffleChannelsNode::ShuffleChannelsExecutor::exec(const uint8_t* srcData, uint8_t* dstData, const int MB) {
if (!permuteKernel)
IE_THROW() << "Could not execute. Kernel for Transpose node was not compiled.";
if (MB > 0)
permuteKernel->execute(srcData, dstData, MB);
else
permuteKernel->execute(srcData, dstData);
}
void MKLDNNShuffleChannelsNode::execute(mkldnn::stream strm) {
auto srcData = reinterpret_cast<const uint8_t*>(this->getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
auto dstData = reinterpret_cast<uint8_t*>(this->getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
if (permuteKernel_) {
if (supportDynamicBatch_)
permuteKernel_->execute(srcData, dstData, batchToProcess());
else
permuteKernel_->execute(srcData, dstData);
} else {
THROW_SHCH_ERROR << "does not initialize permute kernel to execute.";
}
if (!execPtr)
THROW_SHCH_ERROR << "doesn't have a compiled executor.";
int MB = -1;
if (supportDynamicBatch)
MB = isDynamicNode() ? getParentEdgeAt(0)->getMemoryPtr()->getStaticDims()[0] : batchToProcess();
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
uint8_t* dstData = reinterpret_cast<uint8_t*>(getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
execPtr->exec(srcData, dstData, MB);
}
bool MKLDNNShuffleChannelsNode::created() const {

View File

@@ -18,23 +18,40 @@ public:
MKLDNNShuffleChannelsNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
~MKLDNNShuffleChannelsNode() override = default;
void getSupportedDescriptors() override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override;
void execute(mkldnn::stream strm) override;
bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
void prepareParams() override;
protected:
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); };
private:
ngraph::Shape inShape_;
int dataRank_;
int axis_;
size_t group_;
size_t groupSize_;
struct ShuffleChannelsAttributes {
LayoutType layoutType;
int dataRank = 0;
int axis = 0;
int spatialRank = 0;
size_t group = 0lu;
size_t dataSize = 1lu;
} attrs;
std::unique_ptr<PermuteKernel> permuteKernel_;
bool supportDynamicBatch_;
struct ShuffleChannelsExecutor final {
ShuffleChannelsExecutor(const ShuffleChannelsAttributes& attrs, const VectorDims& srcDims, const VectorDims& srcBlockedDims);
void exec(const uint8_t* srcData, uint8_t* dstData, const int MB);
~ShuffleChannelsExecutor() = default;
private:
std::unique_ptr<PermuteKernel> permuteKernel = nullptr;
};
using executorPtr = std::shared_ptr<ShuffleChannelsExecutor>;
executorPtr execPtr = nullptr;
bool supportDynamicBatch = false;
};
} // namespace MKLDNNPlugin

View File

@@ -58,7 +58,6 @@ public:
protected:
void SetUp() override {
InputShape shapes;
ElementType inType;
DepthToSpace::DepthToSpaceMode mode;
std::size_t blockSize;
CPUSpecificParams cpuParams;
@@ -68,15 +67,13 @@ protected:
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
selectedType = selectedType + "_" + InferenceEngine::details::convertPrecision(inType).name();
selectedType = makeSelectedTypeStr(selectedType, inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({shapes});
auto params = ngraph::builder::makeDynamicParams(inType, inputDynamicShapes);
auto d2s = ngraph::builder::makeDepthToSpace(params[0], mode, blockSize);
d2s->get_rt_info() = getCPUInfo();
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(d2s)};
function = std::make_shared<ngraph::Function>(results, params, "DepthToSpaceCPU");
function = makeNgraphFunction(inType, params, d2s, "DepthToSpace");
}
};
@@ -84,8 +81,7 @@ TEST_P(DepthToSpaceLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
// TODO: need to uncomment when this method will be updated
// CheckPluginRelatedResults(executableNetwork, "DepthToSpace");
CheckPluginRelatedResults(executableNetwork, "DepthToSpace");
}
namespace {

View File

@@ -56,26 +56,20 @@ public:
protected:
void SetUp() override {
InputShape shapes;
ElementType elementType;
std::vector<int64_t> padsBegin, padsEnd;
ngraph::helpers::PadMode padMode;
float argPadValue;
CPUSpecificParams cpuParams;
std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = this->GetParam();
std::tie(shapes, inType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
selectedType = selectedType + "_" + InferenceEngine::details::convertPrecision(inType).name();
selectedType = makeSelectedTypeStr("ref", inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({shapes});
auto params = ngraph::builder::makeDynamicParams(elementType, inputDynamicShapes);
auto params = ngraph::builder::makeDynamicParams(inType, inputDynamicShapes);
auto pad = ngraph::builder::makePad(params[0], padsBegin, padsEnd, argPadValue, padMode);
pad->get_rt_info() = getCPUInfo();
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pad)};
function = std::make_shared<ngraph::Function>(results, params, "pad");
function = makeNgraphFunction(inType, params, pad, "Pad");
}
};
@@ -84,8 +78,7 @@ TEST_P(PadLayerCPUTest, CompareWithRefs) {
run();
// TODO: need to uncomment when this method will be updated
//CheckPluginRelatedResults(executableNetwork, "Pad");
CheckPluginRelatedResults(executableNetwork, "Pad");
}
namespace {

View File

@@ -2,74 +2,80 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <shared_test_classes/single_layer/shuffle_channels.hpp>
#include "shared_test_classes/single_layer/shuffle_channels.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ngraph::opset3;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet,
CPUSpecificParams> ShuffleChannelsLayerCPUTestParamsSet;
using ShuffleChannelsLayerCPUTestParamsSet = std::tuple<
InputShape, // Input shape
ElementType, // Input element type
LayerTestsDefinitions::shuffleChannelsSpecificParams,
CPUSpecificParams>;
class ShuffleChannelsLayerCPUTest : public testing::WithParamInterface<ShuffleChannelsLayerCPUTestParamsSet>,
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(testing::TestParamInfo<ShuffleChannelsLayerCPUTestParamsSet> obj) {
LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet basicParamsSet;
InputShape shapes;
ElementType inType;
LayerTestsDefinitions::shuffleChannelsSpecificParams shuffleChannelsParams;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = obj.param;
std::tie(shapes, inType, shuffleChannelsParams, cpuParams) = obj.param;
int axis, group;
std::tie(axis, group) = shuffleChannelsParams;
std::ostringstream result;
result << LayerTestsDefinitions::ShuffleChannelsLayerTest::getTestCaseName(
testing::TestParamInfo<LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet>(basicParamsSet, 0));
std::ostringstream results;
results << "IS=" << CommonTestUtils::partialShape2str({shapes.first}) << "_";
results << "TS=";
for (const auto& item : shapes.second) {
results << CommonTestUtils::vec2str(item) << "_";
}
results << "Prc=" << inType << "_";
results << "Axis=" << std::to_string(axis) << "_";
results << "Group=" << std::to_string(group) << "_";
results << CPUTestsBase::getTestCaseName(cpuParams);
result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str();
return results.str();
}
protected:
void SetUp() override {
LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet basicParamsSet;
InputShape shapes;
ElementType inType;
LayerTestsDefinitions::shuffleChannelsSpecificParams shuffleChannelsParams;
int axis, group;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = this->GetParam();
std::tie(shapes, inType, shuffleChannelsParams, cpuParams) = this->GetParam();
std::tie(axis, group) = shuffleChannelsParams;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
LayerTestsDefinitions::shuffleChannelsSpecificParams shuffleChannelsParams;
std::vector<size_t> inputShape;
Precision netPrecision;
std::tie(shuffleChannelsParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = basicParamsSet;
int axis, group;
std::tie(axis, group) = shuffleChannelsParams;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
auto shuffleChannels = std::dynamic_pointer_cast<ngraph::opset3::ShuffleChannels>(
ngraph::builder::makeShuffleChannels(paramOuts[0], axis, group));
shuffleChannels->get_rt_info() = getCPUInfo();
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(shuffleChannels)};
function = std::make_shared<ngraph::Function>(results, params, "shuffleChannels");
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
selectedType.push_back('_');
selectedType += netPrecision.name();
selectedType = makeSelectedTypeStr(selectedType, inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({shapes});
auto params = ngraph::builder::makeDynamicParams(inType, inputDynamicShapes);
auto shuffleChannels = std::dynamic_pointer_cast<ngraph::opset3::ShuffleChannels>(
ngraph::builder::makeShuffleChannels(params[0], axis, group));
function = makeNgraphFunction(inType, params, shuffleChannels, "ShuffleChannels");
}
};
TEST_P(ShuffleChannelsLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run();
run();
CheckPluginRelatedResults(executableNetwork, "ShuffleChannels");
}
@@ -135,87 +141,117 @@ std::vector<CPUSpecificParams> filterCPUInfoForDevice5DBlock() {
}
/* ========== */
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::BF16,
InferenceEngine::Precision::I8
const std::vector<ElementType> inputElementType = {
ElementType::f32,
ElementType::bf16,
ElementType::i8
};
const auto shuffleChannelsParams4D = ::testing::Combine(
::testing::ValuesIn(std::vector<int>{-4, -2, 0, 1, 2, 3}),
::testing::ValuesIn(std::vector<int>{1, 2, 4, 8})
::testing::ValuesIn(std::vector<int>{-4, -2, 0, 1, 3}),
::testing::ValuesIn(std::vector<int>{1, 2, 4})
);
const auto shuffleChannelsParams5D = ::testing::Combine(
::testing::ValuesIn(std::vector<int>{-5, -1, 0, 1, 2, 3, 4}),
::testing::ValuesIn(std::vector<int>{1, 2, 3, 6})
::testing::ValuesIn(std::vector<int>{-5, -3, -1, 0, 1, 3}),
::testing::ValuesIn(std::vector<int>{1, 2, 3})
);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannels4D, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::Combine(
shuffleChannelsParams4D,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({16, 24, 32, 40})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
::testing::ValuesIn(filterCPUInfoForDevice4D())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannels5D, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::Combine(
shuffleChannelsParams5D,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({12, 18, 12, 18, 24})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
::testing::ValuesIn(filterCPUInfoForDevice5D())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
const auto shuffleChannelsParams4DBlock = ::testing::Combine(
::testing::ValuesIn(std::vector<int>{-4, -2, -1, 0, 2, 3}),
::testing::ValuesIn(std::vector<int>{1, 2, 4, 8})
::testing::ValuesIn(std::vector<int>{1, 2, 4})
);
const auto shuffleChannelsParams5DBlock = ::testing::Combine(
::testing::ValuesIn(std::vector<int>{-5, -2, -1, 0, 2, 3, 4}),
::testing::ValuesIn(std::vector<int>{1, 2, 3, 6})
::testing::ValuesIn(std::vector<int>{1, 2, 3})
);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannels4DBlock, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::Combine(
shuffleChannelsParams4DBlock,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({40, 32, 24, 16})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
::testing::ValuesIn(filterCPUInfoForDevice4DBlock())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
const std::vector<InputShape> inputShapesDynamic4D = {
{{-1, -1, -1, -1},
{{8, 4, 4, 4}, {8, 16, 8, 4}}},
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannels5DBlock, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::Combine(
shuffleChannelsParams5DBlock,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({18, 12, 18, 12, 30})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
::testing::ValuesIn(filterCPUInfoForDevice5DBlock())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
{{-1, 8, -1, -1},
{{8, 8, 8, 8}, {8, 8, 4, 16}}},
{{{4, 32}, {4, 32}, {4, 32}, {4, 32}},
{{4, 12, 8, 8}, {8, 32, 12, 4}}},
};
const std::vector<InputShape> inputShapesDynamic5D = {
{{-1, -1, -1, -1, -1},
{{6, 6, 6, 6, 6}, {12, 6, 12, 12, 12}}},
{{-1, 18, -1, -1, -1},
{{6, 18, 12, 6, 12}, {6, 18, 6, 6, 6}}},
{{{6, 24}, {6, 24}, {6, 24}, {6, 24}, {6, 24}},
{{24, 12, 6, 6, 6}, {12, 24, 6, 12, 12}}},
};
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic4D, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation({{16, 24, 32, 40}})),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams4D,
::testing::ValuesIn(filterCPUInfoForDevice4D())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic4D, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic4D),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams4D,
::testing::ValuesIn(filterCPUInfoForDevice4D())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic5D, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation({{6, 24, 12, 12, 6}})),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams5D,
::testing::ValuesIn(filterCPUInfoForDevice5D())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic5D, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic5D),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams5D,
::testing::ValuesIn(filterCPUInfoForDevice5D())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic4DBlock, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation({{40, 32, 24, 16}})),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams4DBlock,
::testing::ValuesIn(filterCPUInfoForDevice4DBlock())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic4DBlock, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic4D),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams4DBlock,
::testing::ValuesIn(filterCPUInfoForDevice4DBlock())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic5DBlock, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation({{18, 12, 18, 12, 30}})),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams5DBlock,
::testing::ValuesIn(filterCPUInfoForDevice5DBlock())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic5DBlock, ShuffleChannelsLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic5D),
::testing::ValuesIn(inputElementType),
shuffleChannelsParams5DBlock,
::testing::ValuesIn(filterCPUInfoForDevice5DBlock())),
ShuffleChannelsLayerCPUTest::getTestCaseName);
} // namespace

View File

@@ -66,21 +66,18 @@ protected:
void SetUp() override {
InputShape shapes;
StridedSliceParams ssParams;
ElementType elementType;
CPUSpecificParams cpuParams;
std::tie(shapes, ssParams, elementType, cpuParams) = this->GetParam();
std::tie(shapes, ssParams, inType, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
selectedType = selectedType + "_" + InferenceEngine::details::convertPrecision(inType).name();
selectedType = makeSelectedTypeStr("ref", inType);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes({shapes});
auto params = ngraph::builder::makeDynamicParams(elementType, inputDynamicShapes);
auto ss = ngraph::builder::makeStridedSlice(params[0], ssParams.begin, ssParams.end, ssParams.strides, elementType, ssParams.beginMask,
auto params = ngraph::builder::makeDynamicParams(inType, inputDynamicShapes);
auto ss = ngraph::builder::makeStridedSlice(params[0], ssParams.begin, ssParams.end, ssParams.strides, inType, ssParams.beginMask,
ssParams.endMask, ssParams.newAxisMask, ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask);
ss->get_rt_info() = getCPUInfo();
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(ss)};
function = std::make_shared<ngraph::Function>(results, params, "StridedSlice");
function = makeNgraphFunction(inType, params, ss, "StridedSlice");
}
};
@@ -88,8 +85,7 @@ TEST_P(StridedSliceLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
// TODO: need to uncomment when this method will be updated
// CheckPluginRelatedResults(executableNetwork, "StridedSlice");
CheckPluginRelatedResults(executableNetwork, "StridedSlice");
}
namespace {