[GNA] Fixing issues for convolution single-layer tests (#3586)

This commit is contained in:
Elizaveta Lobanova 2021-01-14 11:59:31 +03:00 committed by GitHub
parent d2303262a2
commit 84c06e0856
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 125 additions and 34 deletions

View File

@ -23,6 +23,7 @@
#include "am_intel_dnn.hpp"
#include "dnn_types.h"
#include "gna_types.h"
#include "gna_limitations.hpp"
#if GNA_LIB_VER == 2
#include <gna2-model-api.h>
@ -190,6 +191,11 @@ void GNAPluginNS::backend::AMIntelDNN::InitConvolutional1DComponentPrivate(intel
if (comp.num_rows_in * comp.num_columns_in % 8 != 0) {
THROW_GNA_EXCEPTION << "Number of inputs to Convolutional1DComponent is not multiply by 8";
}
if (comp.op.conv1D.num_filters < GNALimitations::convMinFiltersNum ||
comp.op.conv1D.num_filters > GNALimitations::convMaxFiltersNum ||
comp.op.conv1D.num_filters % GNALimitations::convFiltersNumDivider != 0) {
THROW_GNA_EXCEPTION << "Unsupported number of filters in Convolutional1DComponent: " << comp.op.conv1D.num_filters;
}
auto filter_stride_size = comp.op.conv1D.num_feature_maps * comp.op.conv1D.num_feature_map_columns;
auto max_number_of_out_elements = (comp.num_columns_in - comp.op.conv1D.num_filter_coefficients) / filter_stride_size + 1;
if (comp.num_columns_out / max_number_of_out_elements != comp.op.conv1D.num_filters) {

View File

@ -0,0 +1,17 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
namespace GNAPluginNS {
namespace GNALimitations {
constexpr uint32_t convMinFiltersNum = 4;
constexpr uint32_t convMaxFiltersNum = 65532;
constexpr uint32_t convFiltersNumDivider = 4;
}
} // namespace GNAPluginNS

View File

@ -254,6 +254,15 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
auto out_height = FROM_IR_DIM(outputs, out_order[2]);
auto out_width = FROM_IR_DIM(outputs, out_order[3]);
if (in_height > 1 && in_width == 1) {
std::swap(in_height, in_width);
std::swap(out_height, out_width);
std::swap(convolution._kernel_x, convolution._kernel_y);
std::swap(convolution._padding_x, convolution._padding_y);
std::swap(convolution._stride_x, convolution._stride_y);
std::swap(convolution._dilation_x, convolution._dilation_y);
}
if (in_batch != 1 || out_batch != 1) {
THROW_GNA_LAYER_EXCEPTION(layer) << "with batch size not equals 1 is not supported";
}

View File

@ -1089,7 +1089,7 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap &inputs, Infer
gnadevice ? 2 : 4,
// TODO: only works for cnn4a and google command so far
dims[0],
is2D ? dims[dims.size() - 1] : dims[dims.size() - 1] * dims[dims.size() - 3], // num_feature_vectors looks batch should be there
InferenceEngine::details::product(dims) / dims[0],
num_rotate_rows,
num_rotate_columns);
}

View File

@ -17,63 +17,119 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
};
/* ============= 2D Convolution ============= */
const std::vector<std::vector<size_t >> kernels = {{3, 1},
{5, 1}};
const std::vector<std::vector<size_t >> strides = {{1, 1},
{3, 1}};
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 1},
{3, 1}};
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 1},
const std::vector<std::vector<size_t >> kernelsH1 = {{1, 3},
{1, 5}};
const std::vector<std::vector<size_t >> stridesH1 = {{1, 1},
{1, 3}};
const std::vector<std::vector<ptrdiff_t>> padBeginsH1 = {{1, 0},
{1, 3}};
const std::vector<std::vector<ptrdiff_t>> padEndsH1 = {{1, 0},
{1, 3}};
const std::vector<std::vector<size_t >> dilationsH1 = {{1, 1},
{1, 3}};
// TODO: Currently C != 1 is not supported for graphs with native NCHW layout (will be fixed in 40496)
const std::vector<std::vector<size_t>> inputShapesH1 = {{1, 1, 1, 32},
{1, 1, 1, 160},
{1, 1, 1, 64}};
const std::vector<std::vector<size_t >> kernelsW1 = {{3, 1},
{5, 1}};
const std::vector<std::vector<size_t >> stridesW1 = {{1, 1},
{3, 1}};
const std::vector<std::vector<size_t >> dilations = {{1, 1},
{3, 1}};
const std::vector<size_t> numOutCannels = {1, 5};
const std::vector<ngraph::op::PadType> padTypes = {
ngraph::op::PadType::EXPLICIT,
ngraph::op::PadType::VALID
};
const std::vector<std::vector<ptrdiff_t>> padBeginsW1 = {{0, 1},
{3, 1}};
const std::vector<std::vector<ptrdiff_t>> padEndsW1 = {{0, 1},
{3, 1}};
const std::vector<std::vector<size_t >> dilationsW1 = {{1, 1},
{3, 1}};
// TODO: Currently C != 1 is not supported for graphs with native NCHW layout (will be fixed in 40496)
const std::vector<std::vector<size_t>> inputShapesW1 = {{1, 1, 32, 1},
{1, 1, 160, 1},
{1, 1, 64, 1}};
const std::vector<size_t> numOutCannels = {4, 8, 12};
const auto conv2DParams_ExplicitPadding = ::testing::Combine(
::testing::ValuesIn(kernels),
::testing::ValuesIn(strides),
::testing::ValuesIn(padBegins),
::testing::ValuesIn(padEnds),
::testing::ValuesIn(dilations),
const auto conv2DParams_ExplicitPadding_Height1 = ::testing::Combine(
::testing::ValuesIn(kernelsH1),
::testing::ValuesIn(stridesH1),
::testing::ValuesIn(padBeginsH1),
::testing::ValuesIn(padEndsH1),
::testing::ValuesIn(dilationsH1),
::testing::ValuesIn(numOutCannels),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParams_AutoPadValid = ::testing::Combine(
::testing::ValuesIn(kernels),
::testing::ValuesIn(strides),
const auto conv2DParams_ExplicitPadding_Width1 = ::testing::Combine(
::testing::ValuesIn(kernelsW1),
::testing::ValuesIn(stridesW1),
::testing::ValuesIn(padBeginsW1),
::testing::ValuesIn(padEndsW1),
::testing::ValuesIn(dilationsW1),
::testing::ValuesIn(numOutCannels),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParams_AutoPadValid_Height1 = ::testing::Combine(
::testing::ValuesIn(kernelsH1),
::testing::ValuesIn(stridesH1),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(dilations),
::testing::ValuesIn(dilationsH1),
::testing::ValuesIn(numOutCannels),
::testing::Values(ngraph::op::PadType::VALID)
);
const auto conv2DParams_AutoPadValid_Width1 = ::testing::Combine(
::testing::ValuesIn(kernelsW1),
::testing::ValuesIn(stridesW1),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(dilationsW1),
::testing::ValuesIn(numOutCannels),
::testing::Values(ngraph::op::PadType::VALID)
);
// TODO: Issue: 26417
INSTANTIATE_TEST_CASE_P(DISABLED_smoke_Convolution2D_ExplicitPadding, ConvolutionLayerTest,
// TODO: padding isn't currently supported in GNA
INSTANTIATE_TEST_CASE_P(DISABLED_smoke_Convolution2D_ExplicitPadding_Height1, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_ExplicitPadding,
conv2DParams_ExplicitPadding_Height1,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 1})),
::testing::ValuesIn(inputShapesH1),
::testing::Values(CommonTestUtils::DEVICE_GNA)),
ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(DISABLED_smoke_Convolution2D_AutoPadValid, ConvolutionLayerTest,
INSTANTIATE_TEST_CASE_P(DISABLED_smoke_Convolution2D_ExplicitPadding_Width1, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_AutoPadValid,
conv2DParams_ExplicitPadding_Width1,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 1})),
::testing::ValuesIn(inputShapesW1),
::testing::Values(CommonTestUtils::DEVICE_GNA)),
ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Convolution2D_AutoPadValid_Height1, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_AutoPadValid_Height1,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputShapesH1),
::testing::Values(CommonTestUtils::DEVICE_GNA)),
ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Convolution2D_AutoPadValid_Width1, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_AutoPadValid_Width1,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputShapesW1),
::testing::Values(CommonTestUtils::DEVICE_GNA)),
ConvolutionLayerTest::getTestCaseName);
} // namespace

View File

@ -32,7 +32,7 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: FIX BUG 31661
".*Behavior.*CallbackThrowException.*",
// TODO: FIX BUG 32210
R"(.*(Sigmoid|Tanh|Exp|Log).*)",
R"(.*ActivationLayerTest.CompareWithRefs/(Sigmoid|Tanh|Exp|Log).*)",
// TODO: Issue 32542
R"(.*(EltwiseLayerTest).*eltwiseOpType=(Sum|Sub).*opType=SCALAR.*)",
R"(.*(EltwiseLayerTest).*eltwiseOpType=Prod.*secondaryInputType=PARAMETER.*opType=SCALAR.*)",
@ -50,6 +50,9 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: Issue: 40960
R"(.*(ConstantResultSubgraphTest).*)",
// TODO: Issue: 29577
R"(.*CoreThreadingTests.smoke_QueryNetwork.*)"
R"(.*CoreThreadingTests.smoke_QueryNetwork.*)",
// TODO: Issue 24839
R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(1.3\).*)",
R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)"
};
}