[IE][VPU][GT]: Process StridedSlice stage on device as one kernel (#1244)

* Remove replacement of StridedSlice with other stages and execute it on device as one kernel.
* Refactor strided slice tests to be able to parametrize it by precision.
* Update firmware.
This commit is contained in:
Andrew Bakalin 2020-07-10 14:32:49 +03:00 committed by GitHub
parent 8768313fef
commit 45d1b4eb19
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 333 additions and 478 deletions

View File

@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#
set(FIRMWARE_PACKAGE_VERSION 1240)
set(FIRMWARE_PACKAGE_VERSION 1247)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.02.0")
#

View File

@ -58,7 +58,6 @@ VPU_DECLARE_ENUM(StageType,
Reshape,
Expand,
Crop,
StridedSlice,
Empty = -1,
@ -168,6 +167,7 @@ VPU_DECLARE_ENUM(StageType,
Concat = 128,
Broadcast = 129,
StaticShapeNMS = 130,
StridedSlice = 133,
)
//

View File

@ -202,13 +202,6 @@ PassSet::Ptr PassManager::buildMiddleEnd() {
ADD_DUMP_PASS("replaceWithSCReLU");
}
//
// Replace StridedSlice to other stages
//
ADD_PASS(stridedSlice);
ADD_DUMP_PASS("stridedSlice");
//
// HW stages tiling
//

View File

@ -1,326 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vpu/middleend/pass_manager.hpp>
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <vpu/compile_env.hpp>
namespace vpu {
namespace {
struct StridedSliceParams {
DimValues begin;
DimValues end;
DimValues strides;
DimValues begin_mask;
DimValues end_mask;
};
struct StridedSliceInternalParams {
DimValues begin_dms;
DimValues end_dms;
DimValues strides_dms;
};
class PassImpl final : public Pass {
public:
explicit PassImpl(StageBuilder::Ptr stageBuilder) : _stageBuilder(std::move(stageBuilder)) {}
void run(const Model& model) override;
private:
StageBuilder::Ptr _stageBuilder;
static StridedSliceParams parseInputParams(const Stage& stage);
static StridedSliceInternalParams computeInternalParams(const Stage& stage, StridedSliceParams params);
};
StridedSliceParams PassImpl::parseInputParams(const Stage& stage) {
const auto input = stage->input(0);
const auto beginInput = stage->input(1);
const auto endInput = stage->input(2);
const auto num_input_dims = input->desc().numDims();
StridedSliceParams params;
IE_ASSERT(beginInput->content() != nullptr);
IE_ASSERT(endInput->content() != nullptr);
const auto numpyIdxVectorToDimValues = [&input](const std::vector<int>& values) {
auto dims = DimsOrder::fromNumDims(values.size()).toIndices();
// IE notation to GT notation
std::vector<int> revertedValues(values.size());
std::reverse_copy(values.begin(), values.end(), revertedValues.begin());
int idx = 0;
for (auto& dim : dims) {
auto value = revertedValues[idx++];
if (value < 0) {
value = std::max(input->desc().dim(dim.first) + value + 1, 0);
}
value = std::min(input->desc().dim(dim.first), value);
dim.second = value;
}
return dims;
};
params.begin = numpyIdxVectorToDimValues(
std::vector<int>(beginInput->content()->get<int>(),
beginInput->content()->get<int>() + beginInput->desc().dims().get(Dim::C, 0)));
params.end = numpyIdxVectorToDimValues(
std::vector<int>(endInput->content()->get<int>(),
endInput->content()->get<int>() + endInput->desc().dims().get(Dim::C, 0)));
// Parse strides input data if needed or set it to default values
if (stage->numInputs() == 4) {
const auto stridesInput = stage->input(3);
IE_ASSERT(stridesInput->content() != nullptr);
params.strides = numpyIdxVectorToDimValues(
std::vector<int>(stridesInput->content()->get<int>(),
stridesInput->content()->get<int>() + stridesInput->desc().dims().get(Dim::C, 0)));
} else {
params.strides = numpyIdxVectorToDimValues(std::vector<int>(num_input_dims, 1));
}
IE_ASSERT(params.begin.size() == num_input_dims);
IE_ASSERT(params.end.size() == num_input_dims);
IE_ASSERT(params.strides.size() == num_input_dims);
std::vector<int> begin_mask_values;
std::vector<int> end_mask_values;
std::string begin_mask_str = stage->origLayer()->GetParamAsString("begin_mask", "");
for (const auto& c : begin_mask_str) {
if (c == '1') begin_mask_values.push_back(1);
else if (c == '0') begin_mask_values.push_back(0);
}
begin_mask_values.insert(begin_mask_values.end(), num_input_dims - begin_mask_values.size(), 1);
std::string end_mask_str = stage->origLayer()->GetParamAsString("end_mask", "");
for (const auto& c : end_mask_str) {
if (c == '1') end_mask_values.push_back(1);
else if (c == '0') end_mask_values.push_back(0);
}
end_mask_values.insert(end_mask_values.end(), num_input_dims - end_mask_values.size(), 1);
std::string ellipsis_mask_str = stage->origLayer()->GetParamAsString("ellipsis_mask", "");
for (const auto& c : ellipsis_mask_str) {
IE_ASSERT(c != '1') << "VPU doesn't support ellipsis_mask for StridedSlice";
}
std::string new_axis_mask_str = stage->origLayer()->GetParamAsString("new_axis_mask", "");
for (const auto& c : new_axis_mask_str) {
IE_ASSERT(c != '1') << "VPU doesn't support new_axis_mask for StridedSlice";
}
std::string shrink_axis_mask_str = stage->origLayer()->GetParamAsString("shrink_axis_mask", "");
for (const auto& c : shrink_axis_mask_str) {
IE_ASSERT(c != '1') << "VPU doesn't support shrink_axis_mask for StridedSlice";
}
params.begin_mask = numpyIdxVectorToDimValues(begin_mask_values);
params.end_mask = numpyIdxVectorToDimValues(end_mask_values);
return params;
}
StridedSliceInternalParams PassImpl::computeInternalParams(const Stage& stage, StridedSliceParams params) {
auto input = stage->input(0);
StridedSliceInternalParams m_params = StridedSliceInternalParams();
size_t numDims = input->desc().numDims();
for (const auto& dim : input->desc().dimsOrder().toPermutation()) {
m_params.begin_dms.set(dim, 0);
m_params.end_dms.set(dim, input->desc().dim(dim));
m_params.strides_dms.set(dim, 1);
}
for (const auto& dim : input->desc().dimsOrder().toPermutation()) {
m_params.strides_dms.set(dim, params.strides[dim]);
IE_ASSERT(params.begin_mask[dim] == 1 || params.begin_mask[dim] == 0);
IE_ASSERT(params.end_mask[dim] == 1 || params.end_mask[dim] == 0);
m_params.begin_dms.set(dim, params.begin_mask[dim] ? params.begin[dim] : 0);
m_params.end_dms.set(dim, params.end_mask[dim] ? params.end[dim] : input->desc().dim(dim));
IE_ASSERT(dim != Dim::N || numDims < 4 || m_params.strides_dms[dim] == 1)
<< "VPU doesn't support batch strides for StridedSlice";
IE_ASSERT(m_params.begin_dms[dim] >= 0 && m_params.begin_dms[dim] < m_params.end_dms[dim]);
IE_ASSERT(m_params.end_dms[dim] <= input->desc().dim(dim));
IE_ASSERT(m_params.strides_dms[dim] > 0);
}
return m_params;
}
void PassImpl::run(const Model& model) {
VPU_PROFILE(stridedSlice);
for (const auto& stage : model->getStages()) {
if (stage->type() != StageType::StridedSlice) {
continue;
}
IE_ASSERT(stage->numInputs() == 3 || stage->numInputs() == 4);
IE_ASSERT(stage->numOutputs() == 1);
auto input = stage->input(0);
auto output = stage->output(0);
IE_ASSERT(input->desc().numDims() == output->desc().numDims());
auto params = parseInputParams(stage);
auto m_params = computeInternalParams(stage, params);
model->disconnectStage(stage);
auto directOrder = DimsOrder::fromNumDims(input->desc().numDims());
auto perm = directOrder.toPermutation();
//
// Select a region of interest in accordance with the begin and end parameters.
//
const bool needSelectROI = std::any_of(perm.begin(), perm.end(), [&](Dim dim) {
return m_params.begin_dms[dim] != 0 || m_params.end_dms[dim] != input->desc().dim(dim); });
if (needSelectROI) {
auto roiDesc = input->desc();
for (const auto &dim : perm) {
roiDesc.setDim(dim, m_params.end_dms[dim] - m_params.begin_dms[dim]);
}
auto roiData = model->duplicateData(input, "@roi", roiDesc);
auto cropStage = _stageBuilder->addCropStage(
model,
stage->name() + "@roi-selection",
stage->origLayer(),
input,
roiData);
cropStage->attrs().set("offset", m_params.begin_dms);
input = roiData;
}
//
// Expand each dimension of the input tensor, if it is not completely divided by stride
// for further work.
//
const bool needExpand = std::any_of(perm.begin(), perm.end(), [&](Dim dim) {
return input->desc().dim(dim) % m_params.strides_dms[dim] != 0; });
if (needExpand) {
auto expandDesc = input->desc();
for (const auto& dim : perm) {
auto alignValue = (m_params.strides_dms[dim] - expandDesc.dim(dim) % m_params.strides_dms[dim])
% m_params.strides_dms[dim];
expandDesc.setDim(dim, expandDesc.dim(dim) + alignValue);
}
auto expandedInputData = model->duplicateData(input, "@extended-input", expandDesc);
_stageBuilder->addExpandStage(
model,
stage->name() + "@expand-input",
stage->origLayer(),
input,
expandedInputData);
input = expandedInputData;
}
//
// For copying with stride we do reshape in order to put data of interest at the beginning of each dimension,
// split into necessary and unnecessary data and then reverse reshape.
//
for (const auto& dim : perm) {
if (m_params.strides_dms[dim] == 1)
continue;
auto stride = abs(m_params.strides_dms[dim]);
auto reshapedDesc = input->desc();
auto subtensorDesc = input->desc();
auto intermediateOutDesc = input->desc();
if (input->desc().numDims() == 1) {
reshapedDesc = DataDesc({stride, input->desc().dim(dim) / stride});
subtensorDesc = DataDesc({1, input->desc().dim(dim) / stride});
} else if (perm.front() == dim) {
auto nextDim = perm.at(directOrder.dimInd(dim) + 1);
reshapedDesc.setDim(dim, stride);
reshapedDesc.setDim(nextDim,
input->desc().dim(dim) * input->desc().dim(nextDim) / stride);
subtensorDesc.setDim(dim, 1);
subtensorDesc.setDim(nextDim, reshapedDesc.dim(nextDim));
} else {
auto previousDim = perm.at(directOrder.dimInd(dim) - 1);
reshapedDesc.setDim(dim, input->desc().dim(dim) / stride);
reshapedDesc.setDim(previousDim, input->desc().dim(previousDim) * stride);
subtensorDesc.setDim(dim, reshapedDesc.dim(dim));
subtensorDesc.setDim(previousDim, input->desc().dim(previousDim));
}
intermediateOutDesc.setDim(dim, input->desc().dim(dim) / stride);
auto reshapedInputData = model->duplicateData(
input, formatString("@reshaped-input@dim%s", dim), reshapedDesc);
auto subtensorData = model->duplicateData(
input, formatString("@subtensor@dim%s", dim), subtensorDesc);
auto intermediateOutputData = model->duplicateData(
input, formatString("@intermediate-output@dim%s", dim), intermediateOutDesc);
_stageBuilder->addReshapeStage(
model,
formatString("%s@reshape-input@dim%s", stage->name(), dim),
stage->origLayer(),
input,
reshapedInputData);
_stageBuilder->addSplitStage(
model,
formatString("%s@split@dim%s", stage->name(), dim),
stage->origLayer(),
dim,
reshapedInputData,
{subtensorData});
_stageBuilder->addReshapeStage(
model,
formatString("%s@reshape-output@dim%s", stage->name(), dim),
stage->origLayer(),
subtensorData,
intermediateOutputData);
input = intermediateOutputData;
}
VPU_INTERNAL_CHECK(input->desc().dims() == output->desc().dims(),
"StridedSlice pass: result tensor dims (%v) must be equal to output "
"tensor dims (%v)", input->desc().dims(), output->desc().dims());
_stageBuilder->addCopyStage(
model,
formatString("%s@copy-output", stage->name()),
stage->origLayer(),
input,
output,
"stridedSlice");
model->removeStage(stage);
}
}
} // namespace
Pass::Ptr PassManager::stridedSlice() {
return std::make_shared<PassImpl>(_stageBuilder);
}
} // namespace vpu

View File

@ -7,12 +7,29 @@
#include <memory>
#include <vector>
#include <vpu/stages/post_op_stage.hpp>
namespace vpu {
namespace {
int maskStrToInt(std::string mask) {
int idx = 0, result = 0;
for (const auto& character : mask) {
if (character == ',') continue;
if (idx++ > 0) {
result <<= 1;
}
if (character == '1') {
result = result | 1;
} else if (character != '0') {
VPU_THROW_FORMAT("Unsupported mask value: only 0 or 1 are supported, but got {} instead", character);
}
}
return result;
}
class StridedSliceStage final : public StageNode {
public:
using StageNode::StageNode;
@ -35,36 +52,81 @@ private:
}
void initialCheckImpl() const override {
IE_ASSERT(numInputs() == 3 || numInputs() == 4);
IE_ASSERT(numOutputs() == 1);
VPU_THROW_UNLESS(numInputs() == 3 || numInputs() == 4,
"Validating layer {} with type {} failed: number of input should be 3 or 4, but {} were provided",
name(), type(), numInputs());
VPU_THROW_UNLESS(numOutputs() == 1,
"Validating layer {} with type {} failed: number of outputs should be 1, but {} were provided",
name(), type(), numOutputs());
const auto& input0DataType = input(0)->desc().type();
std::vector<EnumSet<DataType>> expectedInputs3Types =
{ {DataType::FP16}, {DataType::S32}, {DataType::S32} };
{ {input0DataType}, {DataType::S32}, {DataType::S32} };
std::vector<EnumSet<DataType>> expectedInputs4Types =
{ {DataType::FP16}, {DataType::S32}, {DataType::S32}, {DataType::S32} };
{ {input0DataType}, {DataType::S32}, {DataType::S32}, {DataType::S32} };
assertInputsOutputsTypes(
this,
numInputs() == 3 ? expectedInputs3Types : expectedInputs4Types,
{{DataType::FP16}});
{{input0DataType}});
}
void serializeParamsImpl(BlobSerializer&) const override {
VPU_THROW_EXCEPTION << "Must never be called";
void serializeParamsImpl(BlobSerializer& serializer) const override {
std::string beginMask = origLayer()->GetParamAsString("begin_mask", "");
std::string endMask = origLayer()->GetParamAsString("end_mask", "");
serializer.append(maskStrToInt(beginMask));
serializer.append(maskStrToInt(endMask));
}
void serializeDataImpl(BlobSerializer&) const override {
VPU_THROW_EXCEPTION << "Must never be called";
void serializeDataImpl(BlobSerializer& serializer) const override {
input(0)->serializeBuffer(serializer);
input(1)->serializeBuffer(serializer);
input(2)->serializeBuffer(serializer);
input(3)->serializeBuffer(serializer);
output(0)->serializeBuffer(serializer);
}
};
} // namespace
void FrontEnd::parseStridedSlice(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const {
IE_ASSERT(inputs.size() == 3 || inputs.size() == 4);
IE_ASSERT(outputs.size() == 1);
VPU_THROW_UNLESS(inputs.size() == 3 || inputs.size() == 4,
"Parsing layer {} with type {} failed: number of input should be 3 or 4, but {} were provided",
layer->name, layer->type, inputs.size());
VPU_THROW_UNLESS(outputs.size() == 1,
"Parsing layer {} with type {} failed: number of outputs should be 1, but {} were provided",
layer->name, layer->type, outputs.size());
model->addNewStage<StridedSliceStage>(layer->name, StageType::StridedSlice, layer, inputs, outputs);
std::string newAxisMask = layer->GetParamAsString("new_axis_mask", "");
VPU_THROW_UNLESS(maskStrToInt(newAxisMask) == 0,
"Checking {} with type {} failed: new_axis_mask parameter is not supported",
layer->name, layer->type);
std::string shrinkAxisMask = layer->GetParamAsString("shrink_axis_mask", "");
VPU_THROW_UNLESS(maskStrToInt(shrinkAxisMask) == 0,
"Checking {} with type {} failed: shrink_axis_mask parameter is not supported",
layer->name, layer->type);
std::string ellipsisMask = layer->GetParamAsString("ellipsis_mask", "");
VPU_THROW_UNLESS(maskStrToInt(ellipsisMask) == 0,
"Checking {} with type {} failed: ellipsis_mask parameter is not supported",
layer->name, layer->type);
DataVector extendedInputs{inputs.begin(), inputs.end()};
if (inputs.size() == 3) {
extendedInputs.push_back(model->addFakeData());
} else {
const auto& strides = inputs[3];
const auto stridesPtr = strides->content()->get<int32_t>();
VPU_THROW_UNLESS(stridesPtr != nullptr,
"Checking {} with type {} failed: pointer for strides is null");
for (int i = 0; i < strides->desc().totalDimSize(); i++) {
VPU_THROW_UNLESS(stridesPtr[i] > 0,
"Checking {} with type {} failed: negative stride is not supported");
}
}
model->addNewStage<StridedSliceStage>(layer->name, StageType::StridedSlice, layer, extendedInputs, outputs);
}
} // namespace vpu

View File

@ -11,85 +11,79 @@ using namespace LayerTestsDefinitions;
namespace {
stridedSliceParamsTuple ss_only_test_cases[] = {
stridedSliceParamsTuple({ 128, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 },
{ 0, 1, 1 }, { 0, 1, 1 }, { 1, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 128, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1},
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 1, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, -1, 0 }, { 0, 0, 0 }, { 1, 1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 1, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 9, 0 }, { 0, 11, 0 }, { 1, 1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 1, 0 }, { 0, -1, 0 }, { 1, 1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 9, 0 }, { 0, 8, 0 }, { 1, -1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 9, 0 }, { 0, 7, 0 }, { -1, -1, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 7, 0 }, { 0, 9, 0 }, { -1, 1, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 4, 0 }, { 0, 9, 0 }, { -1, 2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 4, 0 }, { 0, 10, 0 }, { -1, 2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 9, 0 }, { 0, 4, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 10, 0 }, { 0, 4, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, 11, 0 }, { 0, 0, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100 }, { 0, -6, 0 }, { 0, -8, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 12, 100, 1, 1 }, { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 },
{ 1, 0, 1, 0 }, { 1, 0, 1, 0 }, { }, { 0, 1, 0, 1 }, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 4, 2 }, { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 },
{0, 1, 1, 0}, {1, 1, 0, 0}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 1, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 },
{0, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::I64, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {},
InferenceEngine::Precision::I64, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {},
InferenceEngine::Precision::I64, CommonTestUtils::DEVICE_GPU),
stridedSliceParamsTuple({ 2, 3, 4, 5, 6 }, { 0, 1, 0, 0, 0 }, { 2, 3, 4, 5, 6 }, { 1, 1, 1, 1, 1 },
{1, 0, 1, 1, 1}, {1, 0, 1, 1, 1}, {}, {0, 1, 0, 0, 0}, {},
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
std::vector<StridedSliceParams> ss_only_test_cases = {
StridedSliceParams{ { { 128, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 },
{ 0, 1, 1 }, { 0, 1, 1 }, { 1, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 128, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1},
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 1, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, -1, 0 }, { 0, 0, 0 }, { 1, 1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 1, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 9, 0 }, { 0, 11, 0 }, { 1, 1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 1, 0 }, { 0, -1, 0 }, { 1, 1, 1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 9, 0 }, { 0, 7, 0 }, { -1, -1, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 7, 0 }, { 0, 9, 0 }, { -1, 1, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 4, 0 }, { 0, 9, 0 }, { -1, 2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 4, 0 }, { 0, 10, 0 }, { -1, 2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 9, 0 }, { 0, 4, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 10, 0 }, { 0, 4, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, 11, 0 }, { 0, 0, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100 }, { 0, -6, 0 }, { 0, -8, 0 }, { -1, -2, -1 },
{ 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 12, 100, 1, 1 }, { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 },
{ 1, 0, 1, 0 }, { 1, 0, 1, 0 }, { }, { 0, 1, 0, 1 }, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 4, 2 }, { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 },
{0, 1, 1, 0}, {1, 1, 0, 0}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 1, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 },
{0, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
InferenceEngine::Precision::I64, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
{0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} },
InferenceEngine::Precision::I64, CommonTestUtils::DEVICE_GPU, {}},
StridedSliceParams{ { { 2, 3, 4, 5, 6 }, { 0, 1, 0, 0, 0 }, { 2, 3, 4, 5, 6 }, { 1, 1, 1, 1, 1 },
{1, 0, 1, 1, 1}, {1, 0, 1, 1, 1}, {}, {0, 1, 0, 0, 0}, {} },
InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU, {}},
};
INSTANTIATE_TEST_CASE_P(

View File

@ -0,0 +1,57 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/strided_slice.hpp"
#include "common_test_utils/test_constants.hpp"
#include "common/myriad_common_test_utils.hpp"
#include "vpu/private_plugin_config.hpp"
#include <vector>
using namespace LayerTestsDefinitions;
namespace {
typedef std::map<std::string, std::string> Config;
std::vector<StridedSliceSpecificParams> testCases = {
{ { 1, 12, 100 }, { 0, 9, 0 }, { 0, 11, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
{ { 1, 12, 100 }, { 0, 1, 0 }, { 0, -1, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
{ { 1, 12, 100 }, { 0, 8, 0 }, { 0, 9, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, {}, {}, {} },
{ { 1, 12, 100 }, { 0, 4, 0 }, { 0, 9, 0 }, { 1, 2, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
{ { 1, 12, 100 }, { 0, 0, 0 }, { 0, 11, 0 }, { 1, 2, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } },
{ { 1, 12, 100 }, { 0, -8, 0 }, { 0, -6, 0 }, { 1, 2, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, {}, {}, {} },
{ { 1, 2, 2, 2 }, { 0, 0, 0, 0 }, { 1, 2, 2, 2 }, { 1, 1, 1, 1 }, {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
{ { 1, 2, 2, 2 }, { 0, 1, 1, 1 }, { 1, 2, 2, 2 }, { 1, 1, 1, 1 }, {0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {} },
{ { 1, 2, 2, 2 }, { 0, 1, 1, 1 }, { 1, 2, 2, 2 }, { 1, 1, 1, 1 }, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} },
{ { 1, 2, 4, 3 }, { 0, 0, 0, 0 }, { 1, 2, 4, 3 }, { 1, 1, 2, 1 }, {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
{ { 1, 2, 4, 2 }, { 0, 0, 0, 1 }, { 1, 2, 4, 2 }, { 1, 1, 2, 1 }, {0, 1, 1, 0}, {1, 1, 0, 0}, {}, {}, {} },
{ { 1, 2, 4, 2 }, { 0, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, 2, 1 }, {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
{ { 1, 2, 4, 2 }, { 0, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, 2, 1 }, {0, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} },
{ { 1, 3, 4, 5, 6 }, { 0, 1, 0, 0, 0 }, { 1, 3, 4, 5, 6 }, { 1, 1, 1, 1, 1 }, {1, 0, 1, 1, 1}, {1, 0, 1, 1, 1}, {}, {}, {} },
};
std::vector<InferenceEngine::Precision> precisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::I32
};
Config getConfig() {
Config config;
if (CommonTestUtils::vpu::CheckMyriad2()) {
config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
}
return config;
}
INSTANTIATE_TEST_CASE_P(StridedSlice_tests, StridedSliceLayerTest,
::testing::Combine(
::testing::ValuesIn(testCases),
::testing::ValuesIn(precisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(getConfig())),
StridedSliceLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,73 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "dsr_tests_common.hpp"
#include <functional_test_utils/layer_test_utils.hpp>
#include <ngraph_functions/builders.hpp>
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
namespace {
using namespace LayerTestsUtils::vpu;
struct StridedSliceParams {
DataShapeWithUpperBound inputShape;
std::vector<int64_t> begin;
std::vector<int64_t> end;
std::vector<int64_t> strides;
std::vector<int64_t> beginMask;
std::vector<int64_t> endMask;
std::vector<int64_t> newAxisMask;
std::vector<int64_t> shrinkAxisMask;
std::vector<int64_t> ellipsisMask;
};
using Parameters = std::tuple<
StridedSliceParams,
DataType, // Net precision
std::string // Device name
>;
class DSR_StridedSlice : public testing::WithParamInterface<Parameters>,
public DSR_TestsCommon {
protected:
std::shared_ptr<ngraph::Node> createTestedOp() override {
StridedSliceParams params;
DataType netPrecision;
std::tie(params, netPrecision, targetDevice) = this->GetParam();
const auto input = createInputSubgraphWithDSR(netPrecision, params.inputShape);
const auto stridedSlice = ngraph::builder::makeStridedSlice(input, params.begin, params.end, params.strides, netPrecision, params.beginMask,
params.endMask, params.newAxisMask, params.shrinkAxisMask, params.ellipsisMask);
return stridedSlice;
}
};
TEST_P(DSR_StridedSlice, CompareWithReference) {
Run();
}
std::vector<StridedSliceParams> testCases = {
{ { { 800, 4 }, { 1000, 4 } }, { 0, 0 }, { -1, 0 }, { 2, 1 }, { 1, 0 }, { 0, 1 }, {}, {}, {} },
{ { { 1, 12, 80 }, { 1, 12, 100 } }, { 0, 9, 0 }, { 0, 11, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, {}, {}, {} },
{ { { 1, 7, 80 }, { 1, 12, 100 } }, { 0, 1, 0 }, { 0, -1, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, {}, {}, {} },
{ { { 1, 10, 70 }, { 1, 12, 100 } }, { 0, 4, 0 }, { 0, 9, 0 }, { 1, 2, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, {}, {}, {} },
{ { { 1, 10, 60 }, { 1, 12, 100 } }, { 0, -8, 0 }, { 0, -6, 0 }, { 1, 2, 1 }, { 1, 0, 1 }, { 1, 0, 1 }, {}, {}, {} },
{ { { 1, 2, 2, 2 }, { 1, 3, 3, 3 } }, { 0, 0, 0, 0 }, { 1, -1, -1, -1 }, { 1, 2, 1, 1 }, {0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {} },
};
std::vector<DataType> precisions = {
ngraph::element::f32,
ngraph::element::i32
};
INSTANTIATE_TEST_CASE_P(StridedSlice, DSR_StridedSlice,
::testing::Combine(
::testing::ValuesIn(testCases),
::testing::ValuesIn(precisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
} // namespace

View File

@ -13,23 +13,29 @@
namespace LayerTestsDefinitions {
using stridedSliceParamsTuple = typename std::tuple<
InferenceEngine::SizeVector, // Input shape
std::vector<int64_t>, // Begin
std::vector<int64_t>, // End
std::vector<int64_t>, // Stride
std::vector<int64_t>, // Begin mask
std::vector<int64_t>, // End mask
std::vector<int64_t>, // New axis mask
std::vector<int64_t>, // Shrink axis mask
std::vector<int64_t>, // Ellipsis axis mask
InferenceEngine::Precision, // Network precision
std::string>; // Device name>;
struct StridedSliceSpecificParams {
InferenceEngine::SizeVector inputShape;
std::vector<int64_t> begin;
std::vector<int64_t> end;
std::vector<int64_t> strides;
std::vector<int64_t> beginMask;
std::vector<int64_t> endMask;
std::vector<int64_t> newAxisMask;
std::vector<int64_t> shrinkAxisMask;
std::vector<int64_t> ellipsisAxisMask;
};
class StridedSliceLayerTest : public testing::WithParamInterface<stridedSliceParamsTuple>,
using StridedSliceParams = std::tuple<
StridedSliceSpecificParams,
InferenceEngine::Precision, // Net precision
std::string, // Device name
std::map<std::string, std::string> // Additional network configuration
>;
class StridedSliceLayerTest : public testing::WithParamInterface<StridedSliceParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<stridedSliceParamsTuple> &obj);
static std::string getTestCaseName(const testing::TestParamInfo<StridedSliceParams> &obj);
protected:
void SetUp() override;

View File

@ -20,43 +20,40 @@
namespace LayerTestsDefinitions {
std::string StridedSliceLayerTest::getTestCaseName(const testing::TestParamInfo<stridedSliceParamsTuple> &obj) {
InferenceEngine::SizeVector inputShape;
std::vector<int64_t> begin, end, stride;
std::vector<int64_t> begin_mask, new_axis_mask, end_mask, shrink_mask, ellipsis_mask;
std::string StridedSliceLayerTest::getTestCaseName(const testing::TestParamInfo<StridedSliceParams> &obj) {
StridedSliceSpecificParams params;
InferenceEngine::Precision netPrc;
std::string targetName;
std::tie(inputShape, begin, end, stride, begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask, netPrc,
targetName) = obj.param;
std::map<std::string, std::string> additionalConfig;
std::tie(params, netPrc, targetName, additionalConfig) = obj.param;
std::ostringstream result;
result << "inShape=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "inShape=" << CommonTestUtils::vec2str(params.inputShape) << "_";
result << "netPRC=" << netPrc.name() << "_";
result << "begin=" << CommonTestUtils::vec2str(begin) << "_";
result << "end=" << CommonTestUtils::vec2str(end) << "_";
result << "stride=" << CommonTestUtils::vec2str(stride) << "_";
result << "begin_m=" << CommonTestUtils::vec2str(begin_mask) << "_";
result << "end_m=" << CommonTestUtils::vec2str(end_mask) << "_";
result << "new_axis_m=" << (new_axis_mask.empty() ? "def" : CommonTestUtils::vec2str(new_axis_mask)) << "_";
result << "shrink_m=" << (shrink_mask.empty() ? "def" : CommonTestUtils::vec2str(shrink_mask)) << "_";
result << "ellipsis_m=" << (ellipsis_mask.empty() ? "def" : CommonTestUtils::vec2str(ellipsis_mask)) << "_";
result << "begin=" << CommonTestUtils::vec2str(params.begin) << "_";
result << "end=" << CommonTestUtils::vec2str(params.end) << "_";
result << "stride=" << CommonTestUtils::vec2str(params.strides) << "_";
result << "begin_m=" << CommonTestUtils::vec2str(params.beginMask) << "_";
result << "end_m=" << CommonTestUtils::vec2str(params.endMask) << "_";
result << "new_axis_m=" << (params.newAxisMask.empty() ? "def" : CommonTestUtils::vec2str(params.newAxisMask)) << "_";
result << "shrink_m=" << (params.shrinkAxisMask.empty() ? "def" : CommonTestUtils::vec2str(params.shrinkAxisMask)) << "_";
result << "ellipsis_m=" << (params.ellipsisAxisMask.empty() ? "def" : CommonTestUtils::vec2str(params.ellipsisAxisMask)) << "_";
result << "targetDevice=" << targetName;
return result.str();
}
void StridedSliceLayerTest::SetUp() {
InferenceEngine::SizeVector inputShape;
std::vector<int64_t> begin, end, stride;
std::vector<int64_t> begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask;
StridedSliceSpecificParams ssParams;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, begin, end, stride, begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask,
netPrecision, targetDevice) = this->GetParam();
std::map<std::string, std::string> additionalConfig;
std::tie(ssParams, netPrecision, targetDevice, additionalConfig) = this->GetParam();
configuration.insert(additionalConfig.begin(), additionalConfig.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto params = ngraph::builder::makeParams(ngPrc, {ssParams.inputShape});
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
auto ss = ngraph::builder::makeStridedSlice(paramOuts[0], begin, end, stride, ngPrc, begin_mask, end_mask,
new_axis_mask, shrink_mask, ellipsis_mask);
auto ss = ngraph::builder::makeStridedSlice(paramOuts[0], ssParams.begin, ssParams.end, ssParams.strides, ngPrc, ssParams.beginMask,
ssParams.endMask, ssParams.newAxisMask, ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(ss)};
function = std::make_shared<ngraph::Function>(results, params, "StridedSlice");
}

View File

@ -285,20 +285,19 @@ TEST_P(myriadLayersTestsStridedSlice_smoke, TestsStridedSlice) {
// Params: in_shape, dim_size, begin, end, stride, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, out_shape
static std::vector<strided_slice_test_param> s_stridedSliceParams = {
strided_slice_test_param{ { 10 }, 1, { 0 }, { 10 }, { 2 }, {}, {}, {}, {}, {}, { 5 } },
strided_slice_test_param{ { 10 }, 1, { 1 }, { 9 }, { 2 }, {}, {}, {}, {}, {}, { 4 } },
strided_slice_test_param{ { 10 }, 1, { 1 }, { 9 }, { 2 }, { 0 }, {}, {}, {}, {}, { 5 } },
strided_slice_test_param{ { 10 }, 1, { 0 }, { 10 }, { 2 }, { 1 }, { 1 }, {}, {}, {}, { 5 } },
strided_slice_test_param{ { 10 }, 1, { 1 }, { 9 }, { 2 }, { 1 }, { 1 }, {}, {}, {}, { 4 } },
strided_slice_test_param{ { 10 }, 1, { 1 }, { 9 }, { 2 }, { 0 }, { 1 }, {}, {}, {}, { 5 } },
strided_slice_test_param{ { 1000, 4 }, 2, { 0, 0 }, { 1000, 4 }, { 1, 4 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1000, 1 } },
strided_slice_test_param{ { 1000, 4 }, 2, { 200, 1 }, { 500, 3 }, { 1, 2 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1000, 1 } },
strided_slice_test_param{ { 1, 2, 35, 33 }, 4, { 0, 0, 0, 2 }, { 1, 2, 33, 31 }, {1, 1, 1, 2}, {}, {}, {}, {}, {}, { 1, 2, 33, 15 } },
strided_slice_test_param{ { 1, 2, 35, 33 }, 4, { 0, 0, 0, 2 }, { 1, 2, 33, 31 }, {1, 1, 1, 2}, { 0, 0, 0, 1 }, { 0, 0, 1, 1 }, {}, {}, {}, { 1, 2, 33, 15 } },
strided_slice_test_param{ { 2, 2, 2, 3}, 4, { 0, 0, 0, 1 }, { 2, 2, 2, 3 }, { 1, 2, 2, 2 }, { 1, 1, 0, 1 }, { 1, 1, 0, 1 }, {}, {}, {}, { 2, 1, 1, 1 } },
strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 2, 0, 0 }, { 2, 7, 32, 32 }, { 1, 3, 1, 1 }, {}, {}, {}, {}, {}, { 2, 2, 32, 32 } },
strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 0, 2, 0 }, { 2, 8, 31, 32 }, { 1, 1, 3, 1 }, {}, {}, {}, {}, {}, { 2, 8, 10, 32 } },
strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 0, 0, 2 }, { 2, 8, 32, 32 }, { 1, 1, 1, 3 }, {}, {}, {}, {}, {}, { 2, 8, 32, 10 } },
strided_slice_test_param{ { 1, 32, 128, 128 }, 4, {0, 0, 0, 0 }, { 1, 32, 128, 128 }, { 1, 2, 4, 8 }, {}, {}, {}, {}, {}, { 1, 16, 32, 16 } },
strided_slice_test_param{ { 1, 32, 128, 128 }, 4, {0, 16, 0, 0 }, { 1, 32, 128, 128 }, {}, {}, {}, {}, {}, {}, { 1, 16, 128, 128 } },
strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 2, 0, 0 }, { 2, 7, 0, 0 }, { 1, 3, 1, 1 }, { 0, 1, 0, 0 }, { 0, 1, 0, 0 }, {}, {}, {}, { 2, 2, 32, 32 } },
strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 0, 2, 0 }, { 0, 0, 31, 0 }, { 1, 1, 3, 1 }, { 0, 0, 1, 0 }, { 0, 0, 1, 0 }, {}, {}, {}, { 2, 8, 10, 32 } },
strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 0, 0, 2 }, { 0, 0, 0, 0 }, { 1, 1, 1, 3 }, { 0, 0, 0, 1 }, { 0, 0, 0, 0 }, {}, {}, {}, { 2, 8, 32, 10 } },
strided_slice_test_param{ { 1, 32, 128, 128 }, 4, {0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 1, 2, 4, 8 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, {}, {}, {}, { 1, 16, 32, 16 } },
strided_slice_test_param{ { 1, 32, 128, 128 }, 4, {0, 16, 0, 0 }, { 0, 0, 0, 0 }, {}, { 0, 1, 0, 0 }, { 0, 0, 0, 0 }, {}, {}, {}, { 1, 16, 128, 128 } },
strided_slice_test_param{ { 4, 1000 }, 2, { 0, 0 }, { 4, 9999 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 4, 1000 } },
strided_slice_test_param{ { 4, 1000 }, 2, { 0, 0 }, { 4, -1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 4, 1000 } },
strided_slice_test_param{ { 4, 1000 }, 2, { 0, 0 }, { 4, -3 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 4, 998 } },
strided_slice_test_param{ { 4, 1000 }, 2, { 0, 0 }, { 4, -1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 4, 999 } },
strided_slice_test_param{ { 4, 1000 }, 2, { 0, 0 }, { 4, -3 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 4, 997 } },
};

View File

@ -1450,7 +1450,7 @@ void ref_strided_slice(const InferenceEngine::Blob::Ptr& src,
for (size_t i = 0; i < num_dims; i++) {
auto value = values[i];
if (value < 0) {
value = std::max<int32_t>(src_dims[i] + value + 1, 0);
value = std::max<int32_t>(src_dims[i] + value, 0);
}
value = std::min<int32_t>(src_dims[i], value);
convertedDims[i] = value;