[CPU] Slice-8 operation implementation. (#7521)

This commit is contained in:
Nikolay Shchegolev
2021-11-29 17:28:12 +03:00
committed by GitHub
parent 2022b70d8b
commit a113c823a7
11 changed files with 879 additions and 197 deletions

View File

@@ -81,6 +81,7 @@ const InferenceEngine::details::caseless_unordered_map<std::string, Type> type_t
{ "ConvolutionBackpropData", Deconvolution },
{ "GroupConvolutionBackpropData", Deconvolution },
{ "StridedSlice", StridedSlice },
{ "Slice", StridedSlice },
{ "Tile", Tile },
{ "ROIAlign", ROIAlign },
{ "ROIPooling", ROIPooling },

View File

@@ -38,6 +38,7 @@
#include <transformations/op_conversions/convert_broadcast_to_tiles.hpp>
#include <transformations/op_conversions/convert_depth_to_space.hpp>
#include <transformations/op_conversions/convert_shuffle_channels3.hpp>
#include <transformations/op_conversions/convert_slice_to_strided_slice.hpp>
#include <transformations/op_conversions/convert_space_to_depth.hpp>
#include <transformations/op_conversions/convert_gelu.hpp>
#include <transformations/op_conversions/convert_gather_downgrade.hpp>
@@ -371,6 +372,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function>
pass_config->disable<ngraph::pass::ConvertReduceMeanToPooling>();
pass_config->disable<ngraph::pass::ConvertReduceMaxToPooling>();
pass_config->disable<ngraph::pass::ConvertReduceSumToPooling>();
pass_config->disable<ngraph::pass::SliceToStridedSlice>();
pass_config->enable<ngraph::pass::NormalizeL2Decomposition>();
pass_config->enable<ngraph::pass::ConvertInterpolate1ToInterpolate4>();

View File

@@ -4,23 +4,14 @@
#include "mkldnn_strided_slice_node.h"
#include <mkldnn_types.h>
#include <mkldnn_extension_utils.h>
#include "ie_parallel.hpp"
#include "caseless.hpp"
#include "common/cpu_memcpy.h"
#include "common/blocked_desc_creator.h"
#include "utils/general_utils.h"
#include "mkldnn_input_node.h"
#include <string>
#include <tuple>
#include <algorithm>
#include "caseless.hpp"
#include <ngraph/opsets/opset1.hpp>
#define THROW_ERROR IE_THROW() << "StridedSlice layer with name '" << getName() << "' "
#include <string>
#define THROW_ERROR IE_THROW() << NameFromType(getType()) << " node with name '" << getName() << "' "
using namespace mkldnn;
using namespace MKLDNNPlugin;
@@ -35,19 +26,20 @@ static inline size_t parallel_init(size_t start, size_t nDims, const VectorDims&
return start;
}
bool MKLDNNStridedSliceNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
bool MKLDNNStridedSliceNode::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {
auto ss = ov::as_type_ptr<const ngraph::opset1::StridedSlice>(op);
if (!ss) {
errorMessage = "Only opset1 StridedSlice operation is supported";
if (!ov::is_type<ov::op::v1::StridedSlice>(op) &&
!ov::is_type<ov::op::v8::Slice>(op)) {
errorMessage = "Only StridedSlice from opset1 and Slice from opset8 operations are supported.";
return false;
}
if (ss->get_input_node_shared_ptr(BEGIN_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static() ||
ss->get_input_node_shared_ptr(END_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static() ||
(ss->get_input_size() == 4 && ss->get_input_node_shared_ptr(STRIDE_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static())) {
// TODO: Support begin, end, stride inputs for dynamic shapes.
errorMessage = "Only Constant 'begin', 'end' and 'stride' inputs are supported.";
if (!ov::is_type<ov::op::v0::Constant>(op->get_input_node_ptr(BEGIN_ID)) ||
!ov::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(END_ID)) ||
(op->get_input_size() > STRIDE_ID && !ov::is_type<ov::op::v0::Constant>(op->get_input_node_ptr(STRIDE_ID))) ||
(op->get_input_size() > AXES_ID && !ov::is_type<ov::op::v0::Constant>(op->get_input_node_ptr(AXES_ID)))) {
// TODO: Support begin, end, stride, axis inputs for dynamic shapes.
errorMessage = "Only Constant 'begin', 'end', 'stride' and 'axis' inputs are supported.";
return false;
}
} catch (...) {
@@ -56,53 +48,31 @@ bool MKLDNNStridedSliceNode::isSupportedOperation(const std::shared_ptr<const ng
return true;
}
MKLDNNStridedSliceNode::MKLDNNStridedSliceNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) :
MKLDNNStridedSliceNode::MKLDNNStridedSliceNode(const std::shared_ptr<ov::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) :
MKLDNNNode(op, eng, cache) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}
auto ss = ov::as_type_ptr<const ngraph::opset1::StridedSlice>(op);
if (inputShapes.size() != 3 && inputShapes.size() != 4) {
isStridedSliceOp = ov::is_type<ov::op::v1::StridedSlice>(op);
if ((isStridedSliceOp && (inputShapes.size() < 3 || inputShapes.size() > 4)) ||
(!isStridedSliceOp && (inputShapes.size() < 4 || inputShapes.size() > 5))) {
THROW_ERROR << "has incorrect number of input edges";
}
if (outputShapes.size() != 1) {
THROW_ERROR << "has incorrect number of output edges";
}
const size_t inputRank = getInputShapeAtPort(DATA_ID).getRank();
const size_t outputRank = getOutputShapeAtPort(0).getRank();
const size_t nDims = std::max(inputRank, outputRank);
auto createMask = [&](const std::vector<int64_t> &origMask, const int bit = 0, bool needReverse = false) {
std::vector<int> mask(origMask.begin(), origMask.end());
if (needReverse) {
for (size_t i = 0; i < mask.size(); i++)
mask[i] = 1 - mask[i];
}
for (size_t i = mask.size(); i < nDims; ++i) mask.push_back(bit);
return mask;
};
attrs.beginMask = createMask(ss->get_begin_mask(), 1, true);
attrs.endMask = createMask(ss->get_end_mask(), 1, true);
attrs.newAxisMask = createMask(ss->get_new_axis_mask());
attrs.shrinkAxisMask = createMask(ss->get_shrink_axis_mask());
auto origEllipsisMask = ss->get_ellipsis_mask();
for (const auto &o : origEllipsisMask) {
attrs.ellipsisMask.push_back(o);
}
if (attrs.ellipsisMask.size() == 0) {
for (size_t i = attrs.ellipsisMask.size(); i < nDims; ++i) attrs.ellipsisMask.push_back(0);
for (size_t i = 0lu; i < op->get_input_size(); i++) {
isConstantInput[i] = ov::is_type<ov::op::v0::Constant>(op->inputs()[i].get_node());
}
attrs.beginDims = getInputShapeAtPort(BEGIN_ID).getStaticDims();
attrs.endDims = getInputShapeAtPort(END_ID).getStaticDims();
if (attrs.beginDims.size() != 1)
THROW_ERROR << " should have begin vector with 1 dimension";
if (attrs.beginDims.size() != 1)
THROW_ERROR << "should have begin vector with 1 dimension";
if (attrs.endDims.size() != 1)
THROW_ERROR << "should have end vector with 1 dimension";
if (attrs.beginDims[0] != attrs.endDims[0])
THROW_ERROR << "should have begin vector with size equal to end vector size";
@@ -115,6 +85,59 @@ MKLDNNStridedSliceNode::MKLDNNStridedSliceNode(const std::shared_ptr<ngraph::Nod
if (attrs.beginDims[0] != attrs.strideDims[0])
THROW_ERROR << "should have stride vector with size equal to begin vector size";
}
if (inputShapes.size() > AXES_ID) {
isAxesSpecified = true;
attrs.axesDims = inputShapes[AXES_ID].getStaticDims();
if (attrs.axesDims.size() != 1)
THROW_ERROR << "should have axes vector with 1 dimension.";
if (attrs.beginDims[0] != attrs.axesDims[0])
THROW_ERROR << "should have axes vector with size equal to begin vector size.";
}
if (isStridedSliceOp) {
auto ss = ov::as_type_ptr<const ov::op::v1::StridedSlice>(op);
const size_t inputRank = getInputShapeAtPort(DATA_ID).getRank();
const size_t outputRank = getOutputShapeAtPort(0).getRank();
const size_t nDims = std::max(inputRank, outputRank);
auto createMask = [&](const std::vector<int64_t> &origMask, const int bit = 0, bool needReverse = false) {
std::vector<int> mask(origMask.begin(), origMask.end());
if (needReverse) {
for (size_t i = 0; i < mask.size(); i++)
mask[i] = 1 - mask[i];
}
for (size_t i = mask.size(); i < nDims; ++i) mask.push_back(bit);
return mask;
};
attrs.beginMask = createMask(ss->get_begin_mask(), 1, true);
attrs.endMask = createMask(ss->get_end_mask(), 1, true);
attrs.newAxisMask = createMask(ss->get_new_axis_mask());
attrs.shrinkAxisMask = createMask(ss->get_shrink_axis_mask());
auto origEllipsisMask = ss->get_ellipsis_mask();
for (const auto &o : origEllipsisMask) {
attrs.ellipsisMask.push_back(o);
}
if (attrs.ellipsisMask.size() == 0) {
for (size_t i = attrs.ellipsisMask.size(); i < nDims; ++i) attrs.ellipsisMask.push_back(0);
}
} else {
const size_t length = outputShapes[0].getRank();
if (inputShapes.size() > AXES_ID) {
attrs.beginMask = std::vector<int>(length, 0);
attrs.endMask = std::vector<int>(length, 0);
} else {
attrs.beginMask = std::vector<int>(length, 1);
attrs.endMask = std::vector<int>(length, 1);
}
attrs.newAxisMask = std::vector<int>(length, 0);
attrs.shrinkAxisMask = std::vector<int>(length, 0);
attrs.ellipsisMask = std::vector<int>(length, 0);
}
}
void MKLDNNStridedSliceNode::getSupportedDescriptors() {
@@ -124,16 +147,20 @@ void MKLDNNStridedSliceNode::getSupportedDescriptors() {
int ellipsisMaskCounter = 0;
int ellipsisPos1 = -1;
for (size_t i = 0; i < attrs.ellipsisMask.size(); i++) {
ellipsisMaskCounter += attrs.ellipsisMask[i];
ellipsisPos1 = attrs.ellipsisMask[i] == 1 && ellipsisPos1 == -1 ? i : ellipsisPos1;
}
if (ellipsisMaskCounter > 1)
THROW_ERROR << "has incorrect 'Ellipsis_mask'. Only one non-zero bit is allowed";
if (isStridedSliceOp) {
for (size_t i = 0; i < attrs.ellipsisMask.size(); i++) {
ellipsisMaskCounter += attrs.ellipsisMask[i];
ellipsisPos1 = attrs.ellipsisMask[i] == 1 && ellipsisPos1 == -1 ? i : ellipsisPos1;
}
if (ellipsisMaskCounter > 1)
THROW_ERROR << "has incorrect 'Ellipsis_mask'. Only one non-zero bit is allowed";
int newAxis = std::accumulate(attrs.newAxisMask.begin(), attrs.newAxisMask.end(), 0);
int shrinkAxis = std::accumulate(attrs.shrinkAxisMask.begin(), attrs.shrinkAxisMask.end(), 0);
attrs.equalDims = newAxis == 0 && shrinkAxis == 0;
int newAxis = std::accumulate(attrs.newAxisMask.begin(), attrs.newAxisMask.end(), 0);
int shrinkAxis = std::accumulate(attrs.shrinkAxisMask.begin(), attrs.shrinkAxisMask.end(), 0);
attrs.equalDims = newAxis == 0 && shrinkAxis == 0;
} else {
attrs.equalDims = true;
}
auto fillingInParameters = [&](std::vector<int> &parameter, const size_t type, const size_t size, const int value) {
const auto constNode = std::dynamic_pointer_cast<MKLDNNInputNode>(getParentEdgesAtPort(type)[0]->getParent());
@@ -146,7 +173,7 @@ void MKLDNNStridedSliceNode::getSupportedDescriptors() {
const int *ptr = static_cast<const int*>(blob->GetPtr());
parameter.assign(ptr, ptr + size);
if (ellipsisMaskCounter == 0 && size < nDims) {
if (type != AXES_ID && ellipsisMaskCounter == 0 && size < nDims) {
for (size_t i = size; i < nDims; i++) parameter.push_back(value);
}
};
@@ -157,6 +184,25 @@ void MKLDNNStridedSliceNode::getSupportedDescriptors() {
fillingInParameters(attrs.end, END_ID, attrs.endDims[0], 0);
if (attrs.strideDims.size())
fillingInParameters(attrs.stride, STRIDE_ID, attrs.strideDims[0], 1);
if (attrs.axesDims.size()) {
fillingInParameters(attrs.axes, AXES_ID, attrs.axesDims[0], 0);
std::vector<int> beginTmp(outputRank, 0);
std::vector<int> endTmp(outputRank, -1);
std::vector<int> strideTmp(outputRank, 1);
size_t i = 0lu;
for (auto& a : attrs.axes) {
if (a < 0)
a += outputRank;
beginTmp[a] = attrs.begin[i];
endTmp[a] = attrs.end[i];
strideTmp[a] = attrs.stride[i++];
attrs.beginMask[a] = 1;
attrs.endMask[a] = 1;
}
attrs.begin = beginTmp;
attrs.end = endTmp;
attrs.stride = strideTmp;
}
if (inputRank > 3 && attrs.equalDims && ellipsisMaskCounter == 1)
addHiddenDims(inputRank, ellipsisPos1);
@@ -194,15 +240,11 @@ void MKLDNNStridedSliceNode::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
return;
InferenceEngine::Precision dataPrecision = getOriginalInputPrecisionAtPort(DATA_ID);
InferenceEngine::Precision beginPrecision = getOriginalInputPrecisionAtPort(BEGIN_ID);
InferenceEngine::Precision endPrecision = getOriginalInputPrecisionAtPort(END_ID);
InferenceEngine::Precision stridePrecision;
if (isStrideSpecified)
stridePrecision = getOriginalInputPrecisionAtPort(STRIDE_ID);
const InferenceEngine::Precision dataPrecision = getOriginalInputPrecisionAtPort(DATA_ID);
const InferenceEngine::Precision iPrecision = Precision::I32;
attrs.dataSize = dataPrecision.size();
size_t nDims = getInputShapeAtPort(DATA_ID).getRank();
const size_t nDims = getInputShapeAtPort(DATA_ID).getRank();
NodeConfig config;
config.dynBatchSupport = false;
@@ -210,12 +252,16 @@ void MKLDNNStridedSliceNode::initSupportedPrimitiveDescriptors() {
config.inConfs[DATA_ID].inPlace = -1;
config.inConfs[BEGIN_ID].inPlace = -1;
config.inConfs[END_ID].inPlace = -1;
config.inConfs[DATA_ID].constant = false;
config.inConfs[BEGIN_ID].constant = true;
config.inConfs[END_ID].constant = true;
config.inConfs[DATA_ID].constant = isConstantInput[DATA_ID];
config.inConfs[BEGIN_ID].constant = isConstantInput[BEGIN_ID];
config.inConfs[END_ID].constant = isConstantInput[END_ID];
if (isStrideSpecified) {
config.inConfs[STRIDE_ID].inPlace = -1;
config.inConfs[STRIDE_ID].constant = true;
config.inConfs[STRIDE_ID].constant = isConstantInput[STRIDE_ID];
}
if (isAxesSpecified) {
config.inConfs[AXES_ID].inPlace = -1;
config.inConfs[AXES_ID].constant = isConstantInput[AXES_ID];
}
config.outConfs.resize(1);
@@ -241,11 +287,13 @@ void MKLDNNStridedSliceNode::initSupportedPrimitiveDescriptors() {
auto range = BlockedDescCreator::makeFilteredRange(creators, nDims, supportedTypes);
for (auto itr = range.first; itr != range.second; ++itr) {
config.inConfs[0].desc = itr->second->createSharedDesc(dataPrecision, getInputShapeAtPort(DATA_ID));
config.inConfs[BEGIN_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(beginPrecision, getInputShapeAtPort(BEGIN_ID));
config.inConfs[END_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(endPrecision, getInputShapeAtPort(END_ID));
config.inConfs[DATA_ID].desc = itr->second->createSharedDesc(dataPrecision, getInputShapeAtPort(DATA_ID));
config.inConfs[BEGIN_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(iPrecision, getInputShapeAtPort(BEGIN_ID));
config.inConfs[END_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(iPrecision, getInputShapeAtPort(END_ID));
if (isStrideSpecified)
config.inConfs[STRIDE_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(stridePrecision, getInputShapeAtPort(STRIDE_ID));
config.inConfs[STRIDE_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(iPrecision, getInputShapeAtPort(STRIDE_ID));
if (isAxesSpecified)
config.inConfs[AXES_ID].desc = creators.at(LayoutType::ncsp)->createSharedDesc(iPrecision, getInputShapeAtPort(AXES_ID));
config.outConfs[0].desc = itr->second->createSharedDesc(dataPrecision, getOutputShapeAtPort(DATA_ID));
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::ref);
@@ -254,7 +302,7 @@ void MKLDNNStridedSliceNode::initSupportedPrimitiveDescriptors() {
void MKLDNNStridedSliceNode::createPrimitive() {
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
auto& srcMemPtr = getParentEdgeAt(DATA_ID)->getMemoryPtr();
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
THROW_ERROR << "has not allocated destination memory.";
if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr())
@@ -308,9 +356,11 @@ void MKLDNNStridedSliceNode::orderParametersByLayouts(const MKLDNNMemoryPtr& src
sortByOrder(attrs.stride);
sortByOrder(attrs.beginMask);
sortByOrder(attrs.endMask);
sortByOrder(attrs.ellipsisMask);
sortByOrder(attrs.newAxisMask);
sortByOrder(attrs.shrinkAxisMask);
if (isStridedSliceOp) {
sortByOrder(attrs.ellipsisMask);
sortByOrder(attrs.newAxisMask);
sortByOrder(attrs.shrinkAxisMask);
}
}
}

View File

@@ -4,7 +4,6 @@
#pragma once
#include <ie_common.h>
#include <mkldnn_node.h>
#include <string>
#include <vector>
@@ -13,9 +12,9 @@ namespace MKLDNNPlugin {
class MKLDNNStridedSliceNode : public MKLDNNNode {
public:
MKLDNNStridedSliceNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
MKLDNNStridedSliceNode(const std::shared_ptr<ov::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override;
@@ -25,9 +24,8 @@ public:
return false;
}
void prepareParams() override;
protected:
void prepareParams() override;
void executeDynamicImpl(mkldnn::stream strm) override;
private:
@@ -38,6 +36,7 @@ private:
std::vector<int> begin;
std::vector<int> end;
std::vector<int> stride;
std::vector<int> axes;
std::vector<int> beginMask;
std::vector<int> endMask;
@@ -48,6 +47,7 @@ private:
VectorDims beginDims;
VectorDims endDims;
VectorDims strideDims;
VectorDims axesDims;
bool equalDims = false;
size_t dataSize = 1lu;
@@ -84,12 +84,17 @@ private:
using executorPtr = std::shared_ptr<StridedSliceExecutor>;
executorPtr execPtr = nullptr;
bool isStridedSliceOp = true;
bool isStrideSpecified = false;
bool isAxesSpecified = false;
static constexpr size_t DATA_ID = 0;
static constexpr size_t BEGIN_ID = 1;
static constexpr size_t END_ID = 2;
static constexpr size_t STRIDE_ID = 3;
static constexpr size_t AXES_ID = 4;
bool isConstantInput[AXES_ID + 1] = {false};
};
} // namespace MKLDNNPlugin

View File

@@ -8,75 +8,120 @@
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace ov::test;
namespace {
const std::vector<InferenceEngine::Precision> inputPrecision = {
InferenceEngine::Precision::I8,
InferenceEngine::Precision::U8,
InferenceEngine::Precision::I16,
InferenceEngine::Precision::I32,
InferenceEngine::Precision::FP32
const std::vector<ElementType> inputPrecisions = {
ElementType::f32,
ElementType::bf16,
ElementType::i8
};
std::vector<SliceSpecificParams> test_cases = {
SliceSpecificParams{ { 16 }, { 4 }, { 12 }, { 1 }, { 0 } },
SliceSpecificParams{ { 16 }, { 0 }, { 8 }, { 2 }, { 0 } },
SliceSpecificParams{ { 20, 10, 5 }, { 0, 0}, { 10, 20}, { 1, 1 }, { 1, 0 } },
SliceSpecificParams{ { 1, 2, 12, 100 }, { 0, 1, 0, 1 }, { 1, 2, 5, 100 }, { 1, 1, 1, 10 }, {} },
SliceSpecificParams{ { 1, 12, 100 }, { 0, 9, 0 }, { 1, 11, 1 }, { 1, 1, 1 }, {} },
SliceSpecificParams{ { 1, 12, 100 }, { 0, 1, 0 }, { 10, -1, 10 }, { 1, 1, 1 }, {} },
SliceSpecificParams{ { 2, 12, 100 }, { 1, 12, 100 }, { 0, 7, 0 }, { -1, -1, -1 }, {} },
SliceSpecificParams{ { 2, 12, 100 }, { 1, 4, 99 }, { 0, 9, 0 }, { -1, 2, -1 }, {} },
SliceSpecificParams{ { 2, 12, 100 }, { -1, -1, -1 }, { 0, 4, 0 }, { -1, -2, -1 }, {} },
SliceSpecificParams{ { 2, 12, 100 }, { -1, -1, -1 }, { 0, 0, 4 }, { -1, -1, -1 }, {2, 0, 1} },
SliceSpecificParams{ { 2, 12, 100 }, { 0, 0, 4 }, { -5, -1, -1 }, { 1, 2, 1 }, {2, 0, 1} },
SliceSpecificParams{ { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, {} },
SliceSpecificParams{ { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, {} },
SliceSpecificParams{ { 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, {} },
SliceSpecificParams{ { 2, 2, 4, 2 }, { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 }, {} },
SliceSpecificParams{ { 1, 2, 4, 2 }, { 0, 1, 0, 1 }, { 10, 2, 4, 2 }, { 1, 1, 2, 1 }, {} },
SliceSpecificParams{ { 10, 2, 4, 2 }, { 9, 1, 3, 0 }, { 0, 0, 0, 1 }, { -1, -1, -1, 1 }, {} },
SliceSpecificParams{ { 10, 2, 4, 2 }, { 19, 1, -1, 0 }, { -10, 0, 0, -1 }, { -1, -1, -1, 1 }, {} },
SliceSpecificParams{ { 3, 2, 4, 200 }, { 0, 1, -1, -1 }, { 3, 2, 0, 0 }, { 1, 1, -2, -1 }, {} },
SliceSpecificParams{ { 2, 4, 5, 5, 68 }, { 0, 1, 0, 0, 0 }, {
const std::vector<ElementType> inputPrecisionsOther = {
ElementType::i64,
ElementType::i32,
ElementType::i16,
ElementType::u8
};
std::vector<Slice8SpecificParams> staticParams = {
Slice8SpecificParams{ {{{}, {{ 16 }}}}, { 4 }, { 12 }, { 1 }, { 0 } },
Slice8SpecificParams{ {{{}, {{ 16 }}}}, { 0 }, { 8 }, { 2 }, { 0 } },
Slice8SpecificParams{ {{{}, {{ 20, 10, 5 }}}}, { 0, 0}, { 10, 20}, { 1, 1 }, { 1, 0 } },
Slice8SpecificParams{ {{{}, {{ 1, 2, 12, 100 }}}}, { 0, 1, 0, 1 }, { 1, 2, 5, 100 }, { 1, 1, 1, 10 }, {} },
Slice8SpecificParams{ {{{}, {{ 1, 12, 100 }}}}, { 0, 9, 0 }, { 1, 11, 1 }, { 1, 1, 1 }, { 0, 1, -1 } },
Slice8SpecificParams{ {{{}, {{ 1, 12, 100 }}}}, { 0, 1, 0 }, { 10, -1, 10 }, { 1, 1, 1 }, { -3, -2, -1} },
Slice8SpecificParams{ {{{}, {{ 2, 12, 100 }}}}, { 1, 12, 100 }, { 0, 7, 0 }, { -1, -1, -1 }, {} },
Slice8SpecificParams{ {{{}, {{ 2, 12, 100 }}}}, { 1, 4, 99 }, { 0, 9, 0 }, { -1, 2, -1 }, {} },
Slice8SpecificParams{ {{{}, {{ 2, 12, 100 }}}}, { -1, -1, -1 }, { 0, 4, 0 }, { -1, -2, -1 }, {} },
Slice8SpecificParams{ {{{}, {{ 2, 12, 100 }}}}, { -1, -1, -1 }, { 0, 0, 4 }, { -1, -1, -1 }, {2, 0, 1} },
Slice8SpecificParams{ {{{}, {{ 2, 12, 100 }}}}, { 0, 0, 4 }, { -5, -1, -1 }, { 1, 2, 1 }, {2, 0, 1} },
Slice8SpecificParams{ {{{}, {{ 2, 2, 2, 2 }}}}, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 2, 2, 2, 2 }}}}, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 2, 2, 4, 3 }}}}, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, { -4, 1, -2, 3 } },
Slice8SpecificParams{ {{{}, {{ 2, 2, 4, 2 }}}}, { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 1, 2, 4, 2 }}}}, { 0, 1, 0, 1 }, { 10, 2, 4, 2 }, { 1, 1, 2, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 1, 2, 4, 2 }}}}, { 1, 0, 1, 0 }, { 2, 4, 2, 10 }, { 1, 2, 1, 1 }, { -1, -2, -3, -4 } },
Slice8SpecificParams{ {{{}, {{ 10, 2, 4, 2 }}}}, { 9, 1, 3, 0 }, { 0, 0, 0, 1 }, { -1, -1, -1, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 10, 2, 4, 2 }}}}, { 19, 1, -1, 0 }, { -10, 0, 0, -1 }, { -1, -1, -1, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 3, 2, 4, 200 }}}}, { 0, 1, -1, -1 }, { 3, 2, 0, 0 }, { 1, 1, -2, -1 }, {} },
Slice8SpecificParams{ {{{}, {{ 2, 4, 5, 5, 68 }}}}, { 0, 1, 0, 0, 0 }, {
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max() }, { 1, 1, 1, 1, 16 }, {} },
SliceSpecificParams{ { 10, 12 }, { -1, 1 }, { -9999, 10 }, { -1, 1 }, {} },
SliceSpecificParams{ { 5, 5, 5, 5 }, { -1, 0, -1, 0 }, { -50, -1, -60, -1 }, { -1, 1, -1, 1 }, {} },
SliceSpecificParams{ { 1, 5, 32, 32 }, { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 1, 5, 32, 20 }, { 0, 1, 0, 0 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 2, 5, 32, 20 }, { 0, 0, 10, 0 }, { 1, 3, 20, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 1, 5, 32, 32 }, { 0, 0, 20, 20 }, { 1, 5, 25, 26 }, { 1, 1, 1, 2 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 2, 5, 32, 32 }, { 0, 0, 0, 20 }, { 1, 2, 30, 30 }, { 1, 1, 2, 1 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 1, 5, 32, 20 }, { 0, 0, 2, 10 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 2, 5, 32, 32 }, { 0, 1, 0, 10 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 1, 5, 32, 20 }, { 0, 1, 2, 10 }, { 1, 5, 32, 18 }, { 1, 1, 1, 2 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, 0, 2, 10 }, { 1, 8, 32, 18 }, { 1, 2, 1, 2 }, { 0, 1, 2, 3 } },
SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, -20, -15 }, { 2, -5, 3 }, { 1, 1, 1 }, { 0, 2, 1 } },
// Plugin Error: Slice has zero dimension which is not allowed
// SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, 0, 10 }, { 0, 32, 18 }, { 1, 1, 1 }, { 0, 1, 2 } },
// SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, 0, 10 }, { 1, 0, 20 }, { 1, 1, 1 }, { 0, 1, 2 } },
// SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, 4, 10 }, { 2, 8, 0 }, { 1, 1, 1 }, { 0, 1, 2 } },
// SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, 4, 10 }, { 2, 8, 0 }, { 1, 1, 1 }, { 0, 2, 1 } },
// SliceSpecificParams{ { 2, 8, 32, 20 }, { 0, 4, 10 }, { 2, 8, 0 }, { 1, 1, 1 }, { 0, -2, -1 } },
Slice8SpecificParams{ {{{}, {{ 10, 12 }}}}, { -1, 1 }, { -9999, 10 }, { -1, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 5, 5, 5, 5 }}}}, { -1, 0, -1, 0 }, { -50, -1, -60, -1 }, { -1, 1, -1, 1 }, {} },
Slice8SpecificParams{ {{{}, {{ 1, 5, 32, 32 }}}}, { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 1, 5, 32, 20 }}}}, { 0, 1, 0, 0 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 2, 5, 32, 20 }}}}, { 0, 0, 10, 0 }, { 1, 3, 20, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 1, 5, 32, 32 }}}}, { 0, 0, 20, 20 }, { 1, 5, 25, 26 }, { 1, 1, 1, 2 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 2, 5, 32, 32 }}}}, { 0, 0, 0, 20 }, { 1, 2, 30, 30 }, { 1, 1, 2, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 1, 5, 32, 20 }}}}, { 0, 0, 2, 10 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 2, 5, 32, 32 }}}}, { 0, 1, 0, 10 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 1, 5, 32, 20 }}}}, { 0, 1, 2, 10 }, { 1, 5, 32, 18 }, { 1, 1, 1, 2 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 2, 8, 32, 20 }}}}, { 0, 0, 2, 10 }, { 1, 8, 32, 18 }, { 1, 2, 1, 2 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ {{{}, {{ 2, 8, 32, 20 }}}}, { 0, -20, -15 }, { 2, -5, 3 }, { 1, 1, 1 }, { 0, 2, 1 } }
};
INSTANTIATE_TEST_SUITE_P(
smoke_MKLDNN, SliceLayerTest,
INSTANTIATE_TEST_SUITE_P(smoke_Static, Slice8LayerTest,
::testing::Combine(
::testing::ValuesIn(test_cases),
::testing::ValuesIn(inputPrecision),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(staticParams),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ElementType::undefined),
::testing::Values(ElementType::undefined),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(std::map<std::string, std::string>())),
SliceLayerTest::getTestCaseName);
Slice8LayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_PrecisionTransformation, Slice8LayerTest,
::testing::Combine(
::testing::Values(staticParams[0]),
::testing::ValuesIn(inputPrecisionsOther),
::testing::Values(ElementType::undefined),
::testing::Values(ElementType::undefined),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(std::map<std::string, std::string>())),
Slice8LayerTest::getTestCaseName);
std::vector<Slice8SpecificParams> dynamicParams = {
Slice8SpecificParams{ {{{ -1 }, {{ 8 }, { 16 }}}}, { 4 }, { 12 }, { 1 }, { 0 } },
Slice8SpecificParams{ {{{ ov::Dimension(2, 20) }, {{ 5 }, { 15 }}}}, { 0 }, { 8 }, { 2 }, { 0 } },
Slice8SpecificParams{ {{{ -1, -1, -1 }, {{ 20, 10, 5 }, {5, 10, 20}}}}, { 0, 0}, { 10, 20}, { 1, 1 }, { 1, 0 } },
Slice8SpecificParams{ {{{ -1, -1, -1, -1 }, {{ 1, 2, 12, 100 }}}}, { 0, 1, 0, 1 }, { 1, 2, 5, 100 }, { 1, 1, 1, 10 }, {} },
Slice8SpecificParams{ {{{ -1, ov::Dimension(2, 20), -1 }, {{ 1, 12, 100 }, { 2, 12, 100 }}}}, { 0, 9, 0 }, { 1, 11, 1 }, { 1, 1, 1 }, {} },
Slice8SpecificParams{ {{{ ov::Dimension(1, 5), ov::Dimension(1, 5), ov::Dimension(1, 5), ov::Dimension(1, 5) },
{{ 2, 2, 2, 2 }, { 2, 2, 4, 3 }, { 2, 2, 4, 2 }, { 1, 2, 4, 2 }}}},
{ 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, {} },
Slice8SpecificParams{ {{{ -1, ov::Dimension(1, 5), ov::Dimension(1, 5), -1 }, {{ 10, 2, 4, 2 }, { 10, 4, 2, 2 }}}},
{ 9, 1, 3, 0 }, { 0, 0, 0, 1 }, { -1, -1, -1, 1 }, {} },
Slice8SpecificParams{ {{{ -1, ov::Dimension(1, 5), -1, -1, ov::Dimension(30, 70) }, {{ 2, 4, 5, 5, 68 }, { 2, 3, 7, 7, 33 }}}},
{ 0, 1, 0, 0, 0 }, {
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max(),
std::numeric_limits<std::int64_t>::max() }, { 1, 1, 1, 1, 16 }, {} },
Slice8SpecificParams{ {{{ov::Dimension(1, 5), ov::Dimension(1, 7), ov::Dimension(1, 35), ov::Dimension(1, 35)},
{{ 1, 5, 32, 32 }, { 2, 5, 32, 20 }, { 2, 5, 32, 32 }}}}, { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } }
};
INSTANTIATE_TEST_SUITE_P(smoke_Dynamic, Slice8LayerTest,
::testing::Combine(
::testing::ValuesIn(dynamicParams),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ElementType::undefined),
::testing::Values(ElementType::undefined),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(std::map<std::string, std::string>())),
Slice8LayerTest::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,554 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
struct Slice8SpecificParams {
std::vector<int64_t> start;
std::vector<int64_t> stop;
std::vector<int64_t> step;
std::vector<int64_t> axes;
};
typedef std::tuple<
std::vector<InputShape>, // Parameters shapes
Slice8SpecificParams, // Slice-8 specific parameters
ElementType, // Network precision
CPUSpecificParams // CPU specific parameters
> Slice8LayerTestCPUParam;
class Slice8LayerCPUTest : public testing::WithParamInterface<Slice8LayerTestCPUParam>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(testing::TestParamInfo<Slice8LayerTestCPUParam> obj) {
std::vector<InputShape> shapes;
Slice8SpecificParams
params;
ElementType netPrecision;
CPUSpecificParams cpuParams;
std::tie(shapes, params, netPrecision, cpuParams) = obj.param;
std::ostringstream result;
result << "IS=(";
for (const auto& shape : shapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
}
result << ")_TS=(";
for (const auto& shape : shapes) {
for (const auto& item : shape.second) {
result << CommonTestUtils::vec2str(item) << "_";
}
}
result << "start=" << CommonTestUtils::vec2str(params.start) << "_";
result << "stop=" << CommonTestUtils::vec2str(params.stop) << "_";
result << "step=" << CommonTestUtils::vec2str(params.step) << "_";
result << "axes=" << CommonTestUtils::vec2str(params.axes) << "_";
result << "netPRC=" << netPrecision << "_";
result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str();
}
protected:
void SetUp() override {
std::vector<InputShape> shapes;
Slice8SpecificParams sliceParams;
ElementType netPrecision;
CPUSpecificParams cpuParams;
std::tie(shapes, sliceParams, netPrecision, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
selectedType = makeSelectedTypeStr(selectedType, netPrecision);
targetDevice = CommonTestUtils::DEVICE_CPU;
init_input_shapes(shapes);
auto params = ngraph::builder::makeDynamicParams(netPrecision, inputDynamicShapes);
auto sliceOp = ngraph::builder::makeSlice(params[0], sliceParams.start, sliceParams.stop, sliceParams.step, sliceParams.axes, netPrecision);
function = makeNgraphFunction(netPrecision, params, sliceOp, "Slice8");
}
};
TEST_P(Slice8LayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "Slice8");
}
namespace {
const auto cpuParams_nChw16c = CPUSpecificParams {{nChw16c}, {nChw16c}, {}, {}};
const auto cpuParams_nCdhw16c = CPUSpecificParams {{nCdhw16c}, {nCdhw16c}, {}, {}};
const auto cpuParams_nChw8c = CPUSpecificParams {{nChw8c}, {nChw8c}, {}, {}};
const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {nCdhw8c}, {}, {}};
const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}};
const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}};
const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {nchw}, {}, {}};
const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {ncdhw}, {}, {}};
const std::vector<ElementType> inputPrecisions = {
ElementType::f32,
ElementType::bf16,
ElementType::i8
};
const std::vector<std::vector<InputShape>> inputShapesDynamic2D = {
{
{ // Origin dynamic shape
{-1, -1},
{ // Dynamic shapes instances
{32, 20}, {16, 16}, {24, 16}
}
}
},
{
{ // Origin dynamic shape
{-1, 16},
{ // Dynamic shapes instances
{16, 16}, {20, 16}, {32, 16}
}
}
},
{
{ // Origin dynamic shape
{ {16, 32}, {16, 32} },
{ // Dynamic shapes instances
{16, 32}, {32, 16}, {24, 24}
}
}
}
};
const std::vector<Slice8SpecificParams> paramsPlain2D = {
Slice8SpecificParams{ { 0, 10 }, { 16, 16 }, { 1, 1 }, { 0, 1 } },
Slice8SpecificParams{ { 2, 5 }, { 16, 8 }, { 1, 1 }, { } },
Slice8SpecificParams{ { 2, 5 }, { 16, 16 }, { 1, 2 }, { 0, 1 } },
Slice8SpecificParams{ { 0, 0 }, { 16, 16 }, { 1, 2 }, { 1, 0} },
Slice8SpecificParams{ { 0 }, { 16 }, { 2 }, { 0 } },
Slice8SpecificParams{ { 0 }, { 16 }, { 1 }, { 1 } }
};
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, Slice8LayerCPUTest,
::testing::Combine(
::testing::Values(static_shapes_to_test_representation({{32, 20}})),
::testing::ValuesIn(paramsPlain2D),
::testing::ValuesIn(inputPrecisions),
::testing::Values(emptyCPUSpec)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic2D),
::testing::ValuesIn(paramsPlain2D),
::testing::ValuesIn(inputPrecisions),
::testing::Values(emptyCPUSpec)),
Slice8LayerCPUTest::getTestCaseName);
const std::vector<Slice8SpecificParams> testCasesCommon4D = {
Slice8SpecificParams{ { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 1, 0, 0 }, { 20, 3, 32, 1 }, { 1, 1, 1, 1 }, { 3, 1, 2, 0 } },
Slice8SpecificParams{ { 0, 0, 10, 0 }, { 1, 3, 20, 20 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 20, 20 }, { 1, 5, 26, 25 }, { 1, 1, 2, 1 }, { 0, 1, 3, 2 } },
Slice8SpecificParams{ { 0, 0, 0, 20 }, { 1, 2, 30, 30 }, { 1, 1, 2, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 2, 10 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 1, 0, 10 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 1, 2, 10 }, { 1, 5, 32, 18 }, { 1, 1, 1, 2 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 0, 2, 10 }, { 1, 8, 32, 18 }, { 1, 2, 1, 2 }, { } },
Slice8SpecificParams{ { 0, 0, 10 }, { 2, 32, 18 }, { 1, 1, 1 }, { 1, 2, 3 } },
Slice8SpecificParams{ { 0, 10 }, { 2, 32 }, { 1, 1 }, { 1, 3 } }
};
const std::vector<std::vector<ov::Shape>> inputShapesStatic4D = {
{{ 1, 5, 32, 32 }}, {{ 2, 5, 32, 48 }}
};
const std::vector<std::vector<InputShape>> inputShapesDynamic4D = {
{
{ // Origin dynamic shape
{-1, -1, -1, -1},
{ // Dynamic shapes instances
{ 1, 5, 32, 32 }, { 2, 5, 32, 32 }, { 1, 5, 64, 64 }
}
}
},
{
{ // Origin dynamic shape
{-1, 5, -1, -1},
{ // Dynamic shapes instances
{ 1, 5, 32, 32 }, { 2, 5, 32, 32 }, { 3, 5, 32, 36 }
}
}
},
{
{ // Origin dynamic shape
{{1, 5}, 5, {32, 64}, {32, 64}},
{ // Dynamic shapes instances
{ 2, 5, 32, 32 }, { 1, 5, 48, 32 }, { 5, 5, 32, 32 }
}
}
}
};
const std::vector<CPUSpecificParams> CPUParamsCommon4D = {
cpuParams_nchw,
cpuParams_nhwc,
};
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic4D)),
::testing::ValuesIn(testCasesCommon4D),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsCommon4D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic4D),
::testing::ValuesIn(testCasesCommon4D),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsCommon4D)),
Slice8LayerCPUTest::getTestCaseName);
const std::vector<Slice8SpecificParams> testCasesBlocked4DSubset1 = {
Slice8SpecificParams{ { 0, 0, 0, 0 }, { 1, 32, 32, 32 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 16, 0 }, { 1, 32, 32, 32 }, { 1, 1, 1, 1 }, { 0, 3, 2, 1 } },
Slice8SpecificParams{ { 0, 0, 0 }, { 32, 32, 16 }, { 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 0 }, { 16, 32, 32 }, { 1, 1, 1 }, { 1, 3, 2 } },
};
const std::vector<Slice8SpecificParams> testCasesBlocked4DSubset2 = {
Slice8SpecificParams{ { 0, 0, 5, 4 }, { 1, 16, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 16, 0, 0 }, { 1, 32, 10, 10 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 10, 0 }, { 16, 1, 20, 10 }, { 1, 1, 1, 1 }, { 1, 0, 2, 3 } },
Slice8SpecificParams{ { 0, 0, 20, 20 }, { 1, 32, 25, 25 }, { 1, 1, 1, 1 }, { 0, 1, 3, 2 } },
Slice8SpecificParams{ { 0, 16, 0, 20 }, { 32, 32, 1, 30 }, { 1, 1, 1, 2 }, { 2, 1, 0, 3 } },
Slice8SpecificParams{ { 0, 16, 2, 10 }, { 1, 32, 32, 20 }, { 1, 1, 2, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 16, 0, 0 }, { 2, 64, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 32, 0, 0 }, { 2, 50, 32, 20 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 0, 0 }, { 32, 12, 2, 20 }, { 1, 1, 1, 1 }, { 0, 3, 2, 1 } },
Slice8SpecificParams{ { 0, -16, 0, 10 }, { 2, 100, 32, 20 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, -16, 0, 0 }, { 2, -4, 32, 20 }, { 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, -32, 0, 0 }, { 2, -12, 32, 20 }, { 1, 1, 1, 1 }, { } }
};
const std::vector<std::vector<ov::Shape>> inputShapesBlockedStatic4DSubset1 = {
{{ 1, 32, 32, 32 }}, {{ 1, 32, 32, 64 }}
};
const std::vector<std::vector<ov::Shape>> inputShapesBlockedStatic4DSubset2 = {
{{ 1, 64, 32, 32 }}, {{ 1, 64, 32, 64 }}
};
const std::vector<std::vector<InputShape>> inputShapesBlockedDynamic4DSubset1 = {
{
{ // Origin dynamic shape
{-1, 32, -1, -1},
{ // Dynamic shapes instances
{ 1, 32, 32, 32 }, { 2, 32, 32, 32 }, { 3, 32, 32, 48 }
}
}
},
{
{ // Origin dynamic shape
{{1, 5}, 32, {32, 64}, {32, 64}},
{ // Dynamic shapes instances
{ 2, 32, 32, 32 }, { 1, 32, 48, 32 }, { 5, 32, 32, 48 }
}
}
}
};
const std::vector<std::vector<InputShape>> inputShapesBlockedDynamic4DSubset2 = {
{
{ // Origin dynamic shape
{-1, 64, -1, -1},
{ // Dynamic shapes instances
{ 1, 64, 64, 32 }, { 2, 64, 32, 32 }, { 3, 64, 32, 48 }
}
}
},
{
{ // Origin dynamic shape
{{1, 5}, 64, {32, 64}, {32, 64}},
{ // Dynamic shapes instances
{ 2, 64, 32, 32 }, { 1, 64, 48, 32 }, { 1, 64, 64, 64 }
}
}
}
};
const std::vector<CPUSpecificParams> CPUParamsBlocked4D = {
cpuParams_nChw16c,
cpuParams_nChw8c,
};
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D_Subset1, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset1)),
::testing::ValuesIn(testCasesBlocked4DSubset1),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked4D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset1, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesBlockedDynamic4DSubset1),
::testing::ValuesIn(testCasesBlocked4DSubset1),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked4D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D_Subset2, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)),
::testing::ValuesIn(testCasesBlocked4DSubset2),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked4D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset2, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesBlockedDynamic4DSubset2),
::testing::ValuesIn(testCasesBlocked4DSubset2),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked4D)),
Slice8LayerCPUTest::getTestCaseName);
const std::vector<Slice8SpecificParams> testCasesCommon5D = {
Slice8SpecificParams{ { 0, 2, 0, 5, 4 }, { 1, 4, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 10, 0, 0 }, { 1, 5, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 1, 10, 0, 0 }, { 20, 3, 20, 32, 1 }, { 1, 1, 1, 1, 1 }, { 4, 1, 2, 3, 0 } },
Slice8SpecificParams{ { 0, 20, 0, 0, 20 }, { 1, 30, 20, 5, 26 }, { 1, 1, 1, 2, 2 }, { 0, 3, 2, 1, 4 } },
Slice8SpecificParams{ { 0, 0, 10, 0, 20 }, { 1, 2, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 2, 10, 0 }, { 1, 5, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 1, 0, 10, 0 }, { 1, 5, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 1, 5, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 1, 2, 3, 4 } }
};
const std::vector<std::vector<ov::Shape>> inputShapesStatic5D = {
{{ 1, 5, 20, 32, 32 }}, {{ 2, 5, 32, 32, 32 }}
};
const std::vector<std::vector<InputShape>> inputShapesDynamic5D = {
{
{ // Origin dynamic shape
{-1, -1, -1, -1, -1},
{ // Dynamic shapes instances
{ 1, 5, 32, 32, 32 }, { 1, 5, 32, 32, 48 }, { 1, 5, 64, 64, 64 }, { 1, 10, 32, 32, 32 }
}
}
},
{
{ // Origin dynamic shape
{-1, 5, -1, -1, -1},
{ // Dynamic shapes instances
{ 1, 5, 32, 32, 48 }, { 1, 5, 32, 48, 32 }, { 1, 5, 48, 32, 32 }
}
}
},
{
{ // Origin dynamic shape
{{1, 5}, 5, {32, 64}, {32, 64}, {32, 64}},
{ // Dynamic shapes instances
{ 2, 5, 32, 32, 32 }, { 1, 5, 48, 32, 32 }, { 5, 5, 32, 32, 48 }
}
}
}
};
const std::vector<CPUSpecificParams> CPUParamsCommon5D = {
cpuParams_ncdhw,
cpuParams_ndhwc,
};
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic5D)),
::testing::ValuesIn(testCasesCommon5D),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsCommon5D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDynamic5D),
::testing::ValuesIn(testCasesCommon5D),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsCommon5D)),
Slice8LayerCPUTest::getTestCaseName);
const std::vector<Slice8SpecificParams> testCasesBlocked5DSubset1 = {
Slice8SpecificParams{ { 0, 0, 0, 5, 4 }, { 1, 16, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 0, 20, 20 }, { 16, 1, 20, 26, 30 }, { 1, 1, 1, 2, 2 }, { 1, 0, 2, 4, 3 } },
Slice8SpecificParams{ { 0, 0, 10, 0, 20 }, { 1, 16, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 2, 10, 0 }, { 1, 16, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 0, 10, 0 }, { 1, 8, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 1, 16, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 1, 2, 3, 4 } },
};
const std::vector<Slice8SpecificParams> testCasesBlocked5DSubset2 = {
Slice8SpecificParams{ { 0, 0, 0, 5, 4 }, { 1, 16, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 5, 4 }, { 16, 5, 28, 27 }, { 1, 1, 1, 1 }, { 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 0, 20, 20 }, { 1, 20, 16, 30, 26 }, { 1, 1, 1, 2, 2 }, { 0, 2, 1, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 10, 0, 20 }, { 1, 16, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 2, 10, 0 }, { 1, 16, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 0, 10, 0 }, { 1, 8, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 10, 16, 1, 16, 16 }, { 2, 1, 1, 1, 1 }, { 2, 1, 0, 3, 4 } },
Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 1, 25, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { } },
Slice8SpecificParams{ { 0, 16, 0, 0, 0 }, { 1, 25, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
Slice8SpecificParams{ { 0, 16, 0, 0, 0 }, { 1, 64, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } },
};
const std::vector<std::vector<ov::Shape>> inputShapesBlockedStatic5DSubset1 = {
{{ 1, 16, 32, 32, 32 }}, {{ 2, 16, 32, 32, 32 }}, {{ 2, 32, 32, 32, 32 }}
};
const std::vector<std::vector<ov::Shape>> inputShapesBlockedStatic5DSubset2 = {
{{ 1, 64, 32, 32, 32 }}, {{ 2, 64, 32, 64, 32 }}, {{ 2, 64, 32, 32, 32 }}
};
const std::vector<std::vector<InputShape>> inputShapesBlockedDynamic5DSubset1 = {
{
{ // Origin dynamic shape
{-1, 16, -1, -1, -1},
{ // Dynamic shapes instances
{ 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }
}
}
},
{
{ // Origin dynamic shape
{{1, 5}, 16, {16, 32}, {16, 32}, {16, 32}},
{ // Dynamic shapes instances
{ 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 1, 16, 20, 32, 32 }
}
}
}
};
const std::vector<std::vector<InputShape>> inputShapesBlockedDynamic5DSubset2 = {
{
{ // Origin dynamic shape
{-1, 64, -1, -1, -1},
{ // Dynamic shapes instances
{ 1, 64, 64, 32, 32 }, { 2, 64, 32, 32, 32 }, { 3, 64, 32, 48, 32 }
}
},
},
{
{ // Origin dynamic shape
{{1, 5}, 64, {16, 32}, {16, 32}, {16, 32}},
{ // Dynamic shapes instances
{ 1, 64, 32, 32, 32 }, { 2, 64, 32, 32, 32 }, { 1, 64, 20, 32, 32 }
}
}
}
};
const std::vector<CPUSpecificParams> CPUParamsBlocked5D = {
cpuParams_nCdhw16c,
cpuParams_nCdhw8c,
};
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D_Subset1, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic5DSubset1)),
::testing::ValuesIn(testCasesBlocked5DSubset1),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked5D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset1, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesBlockedDynamic5DSubset1),
::testing::ValuesIn(testCasesBlocked5DSubset1),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked5D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D_Subset2, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)),
::testing::ValuesIn(testCasesBlocked4DSubset2),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked4D)),
Slice8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset2, Slice8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesBlockedDynamic5DSubset2),
::testing::ValuesIn(testCasesBlocked5DSubset2),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(CPUParamsBlocked5D)),
Slice8LayerCPUTest::getTestCaseName);
/* Descriptors check */
class Slice8LayerDescriptorCPUTest : public Slice8LayerCPUTest {};
TEST_P(Slice8LayerDescriptorCPUTest, DescriptorsCheck) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
ASSERT_THROW(compile_model(), ov::Exception);
}
const std::vector<Slice8SpecificParams> testCasesDescriptors = {
Slice8SpecificParams{ { 0, -4, 0, 0 }, { 0, 2147483647, 0, 0 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 5, 0, 0 }, { 1, 20, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } },
Slice8SpecificParams{ { 0, 0, 0, 0 }, { 1, 2147483647, 32, 32 }, { 1, 2, 1, 1 }, { 0, 1, 2, 3 } }
};
const std::vector<std::vector<InputShape>> inputShapesDescriptors = {
{
{ {},
{ // Static shapes
{ 1, 16, 32, 32 }
}
}
},
{
{ {},
{ // Static shapes
{ 1, 17, 32, 32 }
}
}
},
{
{ // Origin dynamic shapes
{1, -1, 32, 32},
{ // Dynamic shapes instances
{ 1, 16, 32, 32 }, { 1, 32, 32, 32 }
}
}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_Slice8LayerDescriptorCPUTest, Slice8LayerDescriptorCPUTest,
::testing::Combine(
::testing::ValuesIn(inputShapesDescriptors),
::testing::ValuesIn(testCasesDescriptors),
::testing::Values(ElementType::f32),
::testing::Values(cpuParams_nChw8c)),
Slice8LayerDescriptorCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions

View File

@@ -7,7 +7,7 @@
#include "shared_test_classes/single_layer/slice.hpp"
namespace LayerTestsDefinitions {
TEST_P(SliceLayerTest, CompareWithRefs) {
Run();
TEST_P(Slice8LayerTest, CompareWithRefs) {
run();
}
} // namespace LayerTestsDefinitions

View File

@@ -10,32 +10,33 @@
#include <vector>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace LayerTestsDefinitions {
struct SliceSpecificParams {
InferenceEngine::SizeVector inputShape;
std::vector<int64_t> start;
std::vector<int64_t> stop;
std::vector<int64_t> step;
std::vector<int64_t> axes;
struct Slice8SpecificParams {
std::vector<ov::test::InputShape> shapes;
std::vector<int64_t> start;
std::vector<int64_t> stop;
std::vector<int64_t> step;
std::vector<int64_t> axes;
};
using SliceParams = std::tuple<
SliceSpecificParams,
InferenceEngine::Precision, // Net precision
InferenceEngine::Precision, // Input precision
InferenceEngine::Precision, // Output precision
using Slice8Params = std::tuple<
Slice8SpecificParams, // Slice-8 specific parameters
ov::test::ElementType, // Net precision
ov::test::ElementType, // Input precision
ov::test::ElementType, // Output precision
InferenceEngine::Layout, // Input layout
InferenceEngine::Layout, // Output layout
std::string, // Device name
std::map<std::string, std::string> // Additional network configuration
>;
class SliceLayerTest : public testing::WithParamInterface<SliceParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
class Slice8LayerTest : public testing::WithParamInterface<Slice8Params>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<SliceParams> &obj);
static std::string getTestCaseName(const testing::TestParamInfo<Slice8Params> &obj);
protected:
void SetUp() override;

View File

@@ -11,51 +11,50 @@ using namespace ngraph;
namespace LayerTestsDefinitions {
std::string SliceLayerTest::getTestCaseName(const testing::TestParamInfo<SliceParams> &obj) {
SliceSpecificParams params;
InferenceEngine::Precision netPrc;
InferenceEngine::Precision inPrc, outPrc;
std::string Slice8LayerTest::getTestCaseName(const testing::TestParamInfo<Slice8Params> &obj) {
std::vector<ov::test::InputShape> shapes;
Slice8SpecificParams params;
ov::element::Type_t netPrecision, inPrecision, outPrecision;
InferenceEngine::Layout inLayout, outLayout;
std::string targetName;
std::map<std::string, std::string> additionalConfig;
std::tie(params, netPrc, inPrc, outPrc, inLayout, outLayout, targetName, additionalConfig) = obj.param;
std::tie(params, netPrecision, inPrecision, outPrecision, inLayout, outLayout, targetName, additionalConfig) = obj.param;
std::ostringstream result;
result << "inShape=" << CommonTestUtils::vec2str(params.inputShape) << "_";
result << "netPRC=" << netPrc.name() << "_";
result << "start=" << CommonTestUtils::vec2str(params.start) << "_";
result << "stop=" << CommonTestUtils::vec2str(params.stop) << "_";
result << "step=" << CommonTestUtils::vec2str(params.step) << "_";
result << "axes=" << CommonTestUtils::vec2str(params.axes) << "_";
result << "trgDev=" << targetName;
result << "IS=(";
for (const auto& shape : params.shapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
}
result << ")_TS=(";
for (const auto& shape : params.shapes) {
for (const auto& item : shape.second) {
result << CommonTestUtils::vec2str(item) << "_";
}
}
result << "start=" << CommonTestUtils::vec2str(params.start) << "_";
result << "stop=" << CommonTestUtils::vec2str(params.stop) << "_";
result << "step=" << CommonTestUtils::vec2str(params.step) << "_";
result << "axes=" << CommonTestUtils::vec2str(params.axes) << "_";
result << "netPRC=" << netPrecision << "_";
result << "trgDev=" << targetName;
return result.str();
}
void SliceLayerTest::SetUp() {
SliceSpecificParams sliceParams;
InferenceEngine::Precision netPrecision;
void Slice8LayerTest::SetUp() {
Slice8SpecificParams sliceParams;
ov::test::ElementType netPrecision, inPrecision, outPrecision;
InferenceEngine::Layout inLayout, outLayout;
std::map<std::string, std::string> additionalConfig;
std::tie(sliceParams, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice, additionalConfig) = this->GetParam();
std::tie(sliceParams, netPrecision, inPrecision, outPrecision, inLayout, outLayout, targetDevice, additionalConfig) = this->GetParam();
configuration.insert(additionalConfig.begin(), additionalConfig.end());
init_input_shapes(sliceParams.shapes);
auto params = ngraph::builder::makeDynamicParams(netPrecision, inputDynamicShapes);
auto sliceOp = ngraph::builder::makeSlice(params[0], sliceParams.start, sliceParams.stop, sliceParams.step, sliceParams.axes, netPrecision);
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
element::Type_t et = element::i32;
const auto data = std::make_shared<opset8::Parameter>(ngPrc, Shape(sliceParams.inputShape));
const auto start = std::make_shared<opset8::Constant>(et, Shape{sliceParams.start.size()}, sliceParams.start);
const auto stop = std::make_shared<opset8::Constant>(et, Shape{sliceParams.stop.size()}, sliceParams.stop);
const auto step = std::make_shared<opset8::Constant>(et, Shape{sliceParams.step.size()}, sliceParams.step);
Output<Node> slice;
if (sliceParams.axes.empty()) {
slice = std::make_shared<opset8::Slice>(data, start, stop, step);
} else {
const auto axes = std::make_shared<opset8::Constant>(et, Shape{sliceParams.axes.size()}, sliceParams.axes);
slice = std::make_shared<opset8::Slice>(data, start, stop, step, axes);
}
ResultVector results{std::make_shared<opset8::Result>(slice)};
function = std::make_shared<Function>(results, ov::ParameterVector{data}, "Slice");
ov::ResultVector results;
for (int i = 0; i < sliceOp->get_output_size(); i++)
results.push_back(std::make_shared<ov::op::v0::Result>(sliceOp->output(i)));
function = std::make_shared<ngraph::Function>(results, params, "Slice-8");
}
} // namespace LayerTestsDefinitions

View File

@@ -294,6 +294,13 @@ std::shared_ptr<ngraph::Node> makeStridedSlice(const ngraph::Output<Node> &in,
const std::vector<int64_t> &shrink_mask = std::vector<int64_t>{},
const std::vector<int64_t> &ellipsis_mask = std::vector<int64_t>{});
std::shared_ptr<ngraph::Node> makeSlice(const ngraph::Output<Node> &in,
const std::vector<int64_t> &begin,
const std::vector<int64_t> &end,
const std::vector<int64_t> &stride,
const std::vector<int64_t> &axes,
const element::Type &type);
std::shared_ptr<ngraph::Node> makeMVN(const ngraph::Output<Node> &in,
bool acrossChannels,
bool normalizeVariance,

View File

@@ -6,7 +6,7 @@
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeStridedSlice(const ngraph::Output<Node> &in,
std::shared_ptr<ov::Node> makeStridedSlice(const ov::Output<Node> &in,
const std::vector<int64_t> &begin,
const std::vector<int64_t> &end,
const std::vector<int64_t> &stride,
@@ -16,14 +16,32 @@ std::shared_ptr<ngraph::Node> makeStridedSlice(const ngraph::Output<Node> &in,
const std::vector<int64_t> &new_axis_mask,
const std::vector<int64_t> &shrink_mask,
const std::vector<int64_t> &ellipsis_mask) {
ngraph::Shape constShape = {begin.size()};
auto beginNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape, begin.data());
auto endNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape, end.data());
auto strideNode = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64, constShape, stride.data());
auto ssNode = std::make_shared<ngraph::opset2::StridedSlice>(in, beginNode, endNode, strideNode, begin_mask, end_mask,
ov::Shape constShape = {begin.size()};
auto beginNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, begin.data());
auto endNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, end.data());
auto strideNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, stride.data());
auto ssNode = std::make_shared<ov::op::v1::StridedSlice>(in, beginNode, endNode, strideNode, begin_mask, end_mask,
new_axis_mask, shrink_mask, ellipsis_mask);
return ssNode;
}
std::shared_ptr<ov::Node> makeSlice(const ov::Output<Node> &in,
const std::vector<int64_t> &begin,
const std::vector<int64_t> &end,
const std::vector<int64_t> &stride,
const std::vector<int64_t> &axes,
const element::Type &type) {
ov::Shape constShape = {begin.size()};
auto beginNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, begin.data());
auto endNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, end.data());
auto strideNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, stride.data());
if (!axes.empty()) {
auto axesNode = std::make_shared<ov::op::v0::Constant>(ov::element::i64, constShape, axes.data());
return std::make_shared<ov::op::v8::Slice>(in, beginNode, endNode, strideNode, axesNode);
} else {
return std::make_shared<ov::op::v8::Slice>(in, beginNode, endNode, strideNode);
}
}
} // namespace builder
} // namespace ngraph