[GNA] Map 2d convolution to 1d if kernel width/height is equal to input width/height (#4471)

Fix transposition info handling for a crop layer
Throw exception for 1d convolution padding
This commit is contained in:
Elizaveta Lobanova 2021-03-11 15:22:33 +03:00 committed by GitHub
parent b792214d04
commit fd311e60f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 245 additions and 8 deletions

View File

@ -264,6 +264,17 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
std::swap(convolution._dilation_x, convolution._dilation_y);
}
// Map 2d convolution to 1d if it's possible
if (in_height > 1 && in_width > 1 && in_width == convolution._kernel_x && convolution._stride_x == 1) {
in_width *= in_height;
in_height = 1;
out_width *= out_height;
out_height = 1;
convolution._stride_x *= (convolution._stride_y * convolution._kernel_x);
convolution._kernel_x *= convolution._kernel_y;
convolution._kernel_y = 1;
}
if (in_batch != 1 || out_batch != 1) {
THROW_GNA_LAYER_EXCEPTION(layer) << "with batch size not equals 1 is not supported";
}
@ -314,6 +325,12 @@ void GNAGraphCompiler::finalizeConvolution1DPrimitive(InferenceEngine::CNNLayerP
const auto inputs = convolution.insData.front().lock();
const auto outputs = convolution.outData.front();
if (layer->GetParamAsString("auto_pad", "explicit") != "valid" &&
(convolution._padding[0] != 0 || convolution._padding[0] != 0 ||
convolution._pads_end[0] != 0 || convolution._pads_end[1] != 0)) {
THROW_GNA_LAYER_EXCEPTION(&convolution) << "Padding isn't supported by GNA";
}
std::size_t calculated_out_width = (in_width * in_height - convolution._kernel_x + 2 * convolution._padding_x) / convolution._stride_x + 1;
if (out_width * in_height != calculated_out_width) {
THROW_GNA_LAYER_EXCEPTION(&convolution) << "Invalid output configuration. "

View File

@ -230,6 +230,29 @@ inline std::vector<TranspositionInfo> FindTranspositionInfoFromNextLayers(Infere
}
transpositionInfo.insert(std::end(transpositionInfo), std::begin(results), std::end(results));
}
if (LayerInfo(layer).isCrop()) {
auto in_dims = layer->input()->getDims();
auto in_total_size = InferenceEngine::details::product(std::begin(in_dims), std::end(in_dims));
auto crop_layer = LayerInfo(layer).as<const InferenceEngine::CropLayer*>();
IE_ASSERT(crop_layer != nullptr);
size_t crop_offset = 1;
size_t crop_out_size = 1;
bool first_cropped_dim = true;
for (int i = 0; i < crop_layer->axis.size(); ++i) {
if (crop_layer->offset[i] == 0 && crop_layer->dim[i] == in_dims[i]) continue;
crop_offset *= first_cropped_dim ? crop_layer->offset[i] : crop_layer->dim[i];
crop_out_size *= crop_layer->dim[i];
first_cropped_dim = false;
}
auto crop_rest_size = in_total_size - crop_offset - crop_out_size;
if (crop_offset > 0) {
transpositionInfo.insert(std::begin(transpositionInfo), {false, 1, crop_offset});
}
if (crop_rest_size > 0) {
transpositionInfo.push_back({false, 1, crop_rest_size});
}
}
return transpositionInfo;
};

View File

@ -1265,12 +1265,6 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap &inputs, Infer
<< input.second->getTensorDesc().getLayout();
}
auto dims = input.second->getTensorDesc().getDims();
if (inputLayout == Layout::CHW && (dims[0] != 1 || dims[1] != 1)) {
THROW_GNA_EXCEPTION << "For Layout::CHW only dimension with height = 1 and channel = 1 is supported, but was: "
<< dims;
}
if (inputLayout == Layout::NCHW || inputLayout == Layout::CHW) {
// specific case that can be squeezed to 2d
inputLayout = Layout::NC;
@ -1302,6 +1296,7 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap &inputs, Infer
}
}
auto dims = input.second->getTensorDesc().getDims();
auto importedElements = is1D ? dims[0] : details::product(++std::begin(dims), std::end(dims));
auto importedFrames = (is3D || is1D) ? 1 : dims[0];
auto targetGroups = is1D ? 1 : dims[0]; // TODO: no proper support for groups yet

View File

@ -27,7 +27,6 @@ const std::vector<std::vector<ptrdiff_t>> padEndsH1 = {{1, 0},
{1, 3}};
const std::vector<std::vector<size_t >> dilationsH1 = {{1, 1},
{1, 3}};
// TODO: Currently C != 1 is not supported for graphs with native NCHW layout (will be fixed in 40496)
const std::vector<std::vector<size_t>> inputShapesH1 = {{1, 1, 1, 32},
{1, 32, 1, 160},
{1, 8, 1, 64}};
@ -41,7 +40,6 @@ const std::vector<std::vector<ptrdiff_t>> padEndsW1 = {{0, 1},
{3, 1}};
const std::vector<std::vector<size_t >> dilationsW1 = {{1, 1},
{3, 1}};
// TODO: Currently C != 1 is not supported for graphs with native NCHW layout (will be fixed in 40496)
const std::vector<std::vector<size_t>> inputShapesW1 = {{1, 1, 32, 1},
{1, 32, 160, 1},
{1, 8, 64, 1}};
@ -69,6 +67,10 @@ const std::vector<size_t> numOutCannels2D = { 1, 2, 5 };
const std::vector<size_t> input2DNCHW = { 1, 2, 20, 15 };
const std::vector<std::vector<size_t>> inputShapesMapTo1d = {{1, 1, 56, 5},
{1, 32, 56, 5},
{1, 2, 64, 5}};
const auto conv2DParams_Kernels2D = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2D),
@ -114,6 +116,15 @@ const auto conv2DParams_AutoPadValid_Width1 = ::testing::Combine(
::testing::ValuesIn(numOutCannels),
::testing::Values(ngraph::op::PadType::VALID)
);
const auto conv2DParams_AutoPadValid_MapTo1d = ::testing::Combine(
::testing::Values(std::vector<size_t>{3, 5}),
::testing::ValuesIn(stridesW1),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::Values(std::vector<size_t>{1, 1}),
::testing::ValuesIn(numOutCannels),
::testing::Values(ngraph::op::PadType::VALID)
);
// TODO: padding isn't currently supported in GNA
INSTANTIATE_TEST_CASE_P(DISABLED_smoke_Convolution2D_ExplicitPadding_Height1, ConvolutionLayerTest,
@ -164,6 +175,18 @@ INSTANTIATE_TEST_CASE_P(smoke_Convolution2D_AutoPadValid_Width1, ConvolutionLaye
::testing::Values(CommonTestUtils::DEVICE_GNA)),
ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Convolution2D_AutoPadValid_MapTo1d, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_AutoPadValid_MapTo1d,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputShapesMapTo1d),
::testing::Values(CommonTestUtils::DEVICE_GNA)),
ConvolutionLayerTest::getTestCaseName);
// TODO: Enable for GNA 2.1 library
INSTANTIATE_TEST_CASE_P(DISABLED_smoke_Convolution2D_Kernels2D, ConvolutionLayerTest,
::testing::Combine(

View File

@ -0,0 +1,47 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "common_test_utils/test_constants.hpp"
#include "subgraph_tests/stridedslice_conv.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}
}
};
std::vector<convParams> params = {
std::make_tuple(
std::vector<size_t>{1, 1, 1, 256}, //InputShape
std::vector<size_t>{1, 3}, //KernelShape
1), //Stride
std::make_tuple(std::vector<size_t>{1, 1, 1, 1024}, std::vector<size_t>{1, 5}, 1),
std::make_tuple(std::vector<size_t>{1, 1, 1, 336}, std::vector<size_t>{1, 9}, 2),
std::make_tuple(std::vector<size_t>{1, 1, 1, 640}, std::vector<size_t>{1, 8}, 4)
};
std::vector<size_t> outputChannels = {
4,
8
};
INSTANTIATE_TEST_CASE_P(smoke_SliceConvTest, SliceConvTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs),
::testing::ValuesIn(params),
::testing::ValuesIn(outputChannels)),
SliceConvTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,15 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/subgraph/stridedslice_conv.hpp"
namespace SubgraphTestsDefinitions {
TEST_P(SliceConvTest, CompareWithRefImpl) {
Run();
};
} // namespace SubgraphTestsDefinitions

View File

@ -0,0 +1,42 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace SubgraphTestsDefinitions {
typedef std::tuple<
std::vector<size_t>, // Input Shapes
std::vector<size_t>, // Kernel Shape
size_t // Stride
> convParams;
typedef std::tuple<
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Configuration
convParams, // Convolution Params
size_t // Output Channels
> SliceConvParams;
class SliceConvTest : public testing::WithParamInterface<SliceConvParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<SliceConvParams> obj);
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override;
protected:
void SetUp() override;
};
} // namespace SubgraphTestsDefinitions

View File

@ -0,0 +1,75 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/subgraph/stridedslice_conv.hpp"
#include "ngraph_functions/builders.hpp"
namespace SubgraphTestsDefinitions {
std::string SliceConvTest::getTestCaseName(testing::TestParamInfo<SliceConvParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
size_t outputChannels;
convParams convolutionParams;
std::vector<size_t> inputShape;
std::vector<size_t> kernelShape;
size_t stride;
std::tie(netPrecision, targetDevice, configuration, convolutionParams, outputChannels) = obj.param;
std::tie(inputShape, kernelShape, stride) = convolutionParams;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "KS=" << CommonTestUtils::vec2str(kernelShape) << "_";
result << "S=" << stride << "_";
result << "OC=" << outputChannels << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
for (auto const& configItem : configuration) {
result << "_configItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}
InferenceEngine::Blob::Ptr SliceConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
auto* rawBlobDataPtr = blob->buffer().as<float*>();
std::vector<float> values = CommonTestUtils::generate_float_numbers(blob->size(), -2.0f, 2.0f);
for (size_t i = 0; i < blob->size(); i++) {
rawBlobDataPtr[i] = values[i];
}
return blob;
}
void SliceConvTest::SetUp() {
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> tempConfig;
convParams convolutionParams;
size_t outputChannels;
std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, outputChannels) = this->GetParam();
configuration.insert(tempConfig.begin(), tempConfig.end());
std::vector<size_t> inputShape;
std::vector<size_t> kernelShape;
size_t stride;
std::tie(inputShape, kernelShape, stride) = convolutionParams;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
auto ss = ngraph::builder::makeStridedSlice(params[0], std::vector<int64_t>{0, 0, 0, 64}, std::vector<int64_t>{1, 1, 1, 128},
std::vector<int64_t>{1, 1, 1, 1}, ngPrc, std::vector<int64_t>{1, 1, 1, 0},
std::vector<int64_t>{1, 1, 1, 0}, std::vector<int64_t>{0, 0, 0, 0},
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto filterWeights = CommonTestUtils::generate_float_numbers(outputChannels * inputShape[1] * kernelShape[0] * kernelShape[1],
-0.2f, 0.2f);
auto conv = ngraph::builder::makeConvolution(ss, ngPrc, { kernelShape[0], kernelShape[1] }, { stride, stride }, { 0, 0 },
{ 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights);
function = std::make_shared<ngraph::Function>(conv, params, "StridedSliceConvTest");
}
} // namespace SubgraphTestsDefinitions