[GNA] Fix Activation output size not matching convolution if padded. (#1980)

* Fix Activation output size not matching convolution if padded.

* Fix input padding handling in Convolution

* fix static bug

* Use correct value for feature rotation.

* [GNA] Fix regression

* Added tests

* Added tests
This commit is contained in:
Kamil Magierski 2020-09-04 12:23:00 +02:00 committed by GitHub
parent b27ce4b04d
commit 9df59284bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 321 additions and 13 deletions

View File

@ -324,17 +324,23 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
uint32_t num_filters = convolution._out_depth;
uint32_t num_filter_coefficients = single_conv_kernel_size + num_conv_kernel_padding;
uint32_t num_filter_rows = num_filter_coefficients / num_feature_map_columns;
uint32_t num_columns_in = num_inputs + num_input_padding;
uint32_t num_columns_in = num_inputs;
uint32_t num_columns_out = (((num_inputs + num_input_padding - num_filter_coefficients) / num_feature_map_columns) + 1) * convolution._out_depth;
uint32_t num_columns_out = (((num_inputs - num_filter_coefficients) / num_feature_map_columns) + 1) * convolution._out_depth;
uint32_t num_columns_out_unpadded = (((num_inputs - single_conv_kernel_size) / num_feature_map_columns) + 1) * convolution._out_depth;
uint32_t original_num_feature_map_rows = num_feature_map_rows;
uint32_t original_input_padding = num_input_padding;
uint32_t additional_padding = 0;
// if kernel padding to multiple of 8 will cause missed outputs, need to pad further
while (num_columns_out < out_batch * out_channels * out_height * out_width) {
num_input_padding += 8;
num_input_padding = original_input_padding + additional_padding;
num_feature_map_rows = original_num_feature_map_rows + (num_input_padding) / num_feature_map_columns;
num_columns_in = num_inputs + num_input_padding;
num_columns_out = (((num_inputs + num_input_padding - num_filter_coefficients) / num_feature_map_columns) + 1) * convolution._out_depth;
dnn->new_num_conv_columns = num_columns_out / convolution._out_depth;
dnn->new_num_conv_columns = num_columns_out;
additional_padding += 8;
}
if (num_input_padding == 0) {
@ -406,7 +412,7 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
// Kaldi features are opposite orientation
dnn->do_rotate_input = true;
dnn->num_rotate_rows = num_feature_map_columns;
dnn->num_rotate_columns = num_feature_map_rows;
dnn->num_rotate_columns = original_num_feature_map_rows;
} else {
dnn->do_rotate_input = false;
}
@ -1509,6 +1515,7 @@ void GNAGraphCompiler::PWLPrimitive(InferenceEngine::CNNLayerPtr layer) {
if (dnn->new_num_conv_columns) {
num_rows = dnn->new_num_conv_columns;
if (inputs->getDims().size() == 4) num_rows /= FROM_IR_DIM(inputs, 3);
dnn->new_num_conv_columns = 0;
}

View File

@ -688,7 +688,7 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
}
// calculating input orientation without memory layers, since their orientation not changed during infer right now
std::unordered_map<string, string> skippedLayers;
std::unordered_map<string, std::vector<string>> skippedLayers;
bool withConv = false;
for (auto &layer : sortedNet) {
@ -715,23 +715,32 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
auto dnnLayer = graphCompiler.dnnComponents.findComponent(layer);
string inputName = prevLayer->name;
std::vector<string> inputs;
if (skippedLayers.count(prevLayer->name)) {
inputName = skippedLayers[prevLayer->name];
inputs = skippedLayers[prevLayer->name];
} else {
inputs.push_back(inputName);
}
// non functional layer - skipped by gna
if (nullptr == dnnLayer) {
// storing input name for skipped layer
skippedLayers[layer->name] = inputName;
if (skippedLayers[inputName].size() == 0) {
skippedLayers[layer->name].push_back(inputName);
} else {
skippedLayers[layer->name] = skippedLayers[inputName];
}
continue;
}
// input orientation might be already initialized, thus verify that it matches
if (!inputsDesc->orientation_in.count(inputName)) {
inputsDesc->orientation_in[inputName] = dnnLayer->orientation_in;
} else {
if (inputsDesc->orientation_in[inputName] != dnnLayer->orientation_in) {
THROW_GNA_EXCEPTION << "orientation for input layer: " << inputName << "cannot be calculated";
for (auto input : inputs) {
if (!inputsDesc->orientation_in.count(input)) {
inputsDesc->orientation_in[input] = dnnLayer->orientation_in;
} else {
if (inputsDesc->orientation_in[input] != dnnLayer->orientation_in) {
THROW_GNA_EXCEPTION << "orientation for input layer: " << input << "cannot be calculated";
}
}
}
}

View File

@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <vector>
#include "subgraph_tests/reshape_permute_conv_permute_reshape_act.hpp"
#include "common_test_utils/test_constants.hpp"
std::vector<std::array<size_t, 4>> input_shapes {
{1, 1, 166, 2},
{1, 1, 144, 2},
{1, 1, 288, 2},
{1, 1, 144, 4},
};
std::vector<std::array<size_t, 2>> kernel_shapes {
{1, 7},
{1, 15},
};
std::vector<size_t> output_channels {
16,
8,
4,
};
std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
std::map<std::string, std::string> additional_config = { };
namespace LayerTestsDefinitions {
INSTANTIATE_TEST_CASE_P(basic, ConvReshapeAct,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(kernel_shapes),
::testing::ValuesIn(output_channels),
::testing::Values(additional_config)),
ConvReshapeAct::getTestCaseName);
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,47 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <vector>
#include "subgraph_tests/reshape_permute_conv_permute_reshape_act.hpp"
#include "common_test_utils/test_constants.hpp"
std::vector<std::array<size_t, 4>> input_shapes {
{1, 1, 166, 2},
{1, 1, 144, 2},
{1, 1, 288, 2},
{1, 1, 144, 4},
};
std::vector<std::array<size_t, 2>> kernel_shapes {
{1, 7},
{1, 15},
};
std::vector<size_t> output_channels {
16,
8,
4,
};
std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
std::map<std::string, std::string> additional_config = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "2340"}
};
namespace LayerTestsDefinitions {
INSTANTIATE_TEST_CASE_P(basic, ConvReshapeAct,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(kernel_shapes),
::testing::ValuesIn(output_channels),
::testing::Values(additional_config)),
ConvReshapeAct::getTestCaseName);
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <vector>
#include "subgraph_tests/reshape_permute_conv_permute_reshape_act.hpp"
#include "common_test_utils/test_constants.hpp"
std::vector<std::array<size_t, 4>> input_shapes {
{1, 1, 166, 2},
{1, 1, 144, 2},
{1, 1, 288, 2},
{1, 1, 144, 4},
};
std::vector<std::array<size_t, 2>> kernel_shapes {
{1, 7},
{1, 15},
};
std::vector<size_t> output_channels {
16,
8,
4,
};
std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
std::map<std::string, std::string> additional_config = {};
namespace LayerTestsDefinitions {
INSTANTIATE_TEST_CASE_P(basic, ConvReshapeAct,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(kernel_shapes),
::testing::ValuesIn(output_channels),
::testing::Values(additional_config)),
ConvReshapeAct::getTestCaseName);
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,37 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <array>
#include <string>
#include <memory>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::array<size_t, 4>, // Input shape
std::array<size_t, 2>, // Kernel shape
size_t, // Output channels
std::map<std::string, std::string> // Configuration
> ConvReshapeActParams;
class ConvReshapeAct : public testing::WithParamInterface<ConvReshapeActParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConvReshapeActParams> obj);
protected:
void SetUp() override;
void Run() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,120 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <tuple>
#include <string>
#include <numeric>
#include <vector>
#include <memory>
#include <debug.h>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "subgraph_tests/reshape_permute_conv_permute_reshape_act.hpp"
namespace LayerTestsDefinitions {
std::string ConvReshapeAct::getTestCaseName(testing::TestParamInfo<ConvReshapeActParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetName;
std::array<size_t, 4> input_shape;
std::array<size_t, 2> kernel_shape;
size_t output_channels;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetName, input_shape, kernel_shape, output_channels, configuration) = obj.param;
std::ostringstream results;
results << "IS=" << CommonTestUtils::vec2str(std::vector<size_t>(input_shape.begin(), input_shape.end())) << "_";
results << "KS=" << CommonTestUtils::vec2str(std::vector<size_t>(kernel_shape.begin(), kernel_shape.end())) << "_";
results << "OC=" << output_channels << "_";
results << "netPRC=" << netPrecision.name() << "_";
results << "targetDevice=" << targetName;
return results.str();
}
void ConvReshapeAct::SetUp() {
InferenceEngine::Precision netPrecision;
std::array<size_t, 4> input_shape;
std::array<size_t, 2> kernel_shape;
size_t output_channels;
std::map<std::string, std::string> additional_config;
std::tie(netPrecision, targetDevice, input_shape, kernel_shape, output_channels, additional_config) = this->GetParam();
configuration.insert(additional_config.begin(), additional_config.end());
const std::size_t input_dim = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies<size_t>());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
std::vector<size_t> input_dims { 1, input_dim };
std::vector<size_t> reshape_in_dims = std::vector<size_t>(input_shape.begin(), input_shape.end());
std::vector<size_t> permute_in_order = { 0, 3, 1, 2 };
std::vector<size_t> permute_out_order = { 0, 2, 3, 1 };
std::vector<size_t> reshape_out_dims = { 1, input_shape[0] * input_shape[1] * (input_shape[2] - kernel_shape[1] + 1) * output_channels };
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
auto reshape_in_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{4},
reshape_in_dims);
auto reshape_in = std::make_shared<ngraph::op::v1::Reshape>(input_parameter[0], reshape_in_pattern, false);
auto permute_in_params = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
ngraph::Shape{4},
ngraph::Shape{permute_in_order});
auto permute_in = std::make_shared<ngraph::opset1::Transpose>(reshape_in, permute_in_params);
auto conv = ngraph::builder::makeConvolution(permute_in, ngPrc, {kernel_shape[0], kernel_shape[1]}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
ngraph::op::PadType::VALID, output_channels);
auto permute_out_params = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
ngraph::Shape{4},
permute_out_order);
auto permute_out = std::make_shared<ngraph::opset1::Transpose>(conv, permute_out_params);
auto reshape_out_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{2},
std::vector<size_t>{reshape_out_dims});
auto reshape_out = std::make_shared<ngraph::op::v1::Reshape>(permute_out, reshape_out_pattern, false);
auto tanh = std::make_shared<ngraph::op::Tanh>(reshape_out);
function = std::make_shared<ngraph::Function>(tanh, input_parameter, "conv_reshape_act");
}
void ConvReshapeAct::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
ConfigurePlugin();
LoadNetwork();
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
for (const auto &input : cnnNetwork.getInputsInfo()) {
const auto &info = input.second;
auto tensorDesc = info->getTensorDesc();
auto blob = FuncTestUtils::createAndFillBlobFloat(tensorDesc, 2, -1, 100, 111);
FuncTestUtils::fillInputsBySinValues(blob);
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
configuration.count(InferenceEngine::PluginConfigParams::YES)) {
auto batchSize = cnnNetwork.getInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
inferRequest.SetBatch(batchSize);
}
inferRequest.Infer();
threshold = 0.1;
Validate();
}
TEST_P(ConvReshapeAct, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions