[GNA] Add support for SubstituteSoftSign for pattern with addition (#2789)

* [GNA] add changes for SubstituteSoftSign for IRv10

* cpplint fix

* [GNA] add requested changes & change SoftSign test validation

* [GNA] SubstituteSoftsign refactor

* [GNA] enable 3d input tests for softsign
This commit is contained in:
Anna Alberska 2020-11-09 14:40:31 +01:00 committed by GitHub
parent 4871e1802d
commit 1220fb2fe8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 208 additions and 9 deletions

View File

@ -291,6 +291,21 @@ void ReorderMaxPoolPass::run() {
}
void SubstituteSoftSignPass::run() {
//detecting following pattern
// irv7 model: irv10 model:
// a layer a layer
// | \ | \
// abs \ abs \
// | | | |
// | | add |
// | | | |
// power | power |
// \ / \ /
// mul mul
auto fp32eq = [](float p1, float p2) {
return (std::abs(p1 - p2) <= 0.00001f * std::min(std::abs(p1), std::abs(p2)));
};
auto hasNChildren = [](CNNLayerPtr l, int N){
if (l->outData.size() != 1) return false;
if (getInputTo(l->outData.front()).size() != N) return false;
@ -319,13 +334,24 @@ void SubstituteSoftSignPass::run() {
}
if (cont) continue;
if (!hasNChildren(abs, 1)) continue;
auto power = getNthChild(abs, 0);
auto addition = getNthChild(abs, 0);
InferenceEngine::CNNLayerPtr power = nullptr;
if (!LayerInfo(power).isPower()) continue;
auto powerLayer = LayerInfo(power).as<PowerLayer*>();
if (powerLayer->power != -1) continue;
if (powerLayer->offset != 1) continue;
if (powerLayer->scale != 1) continue;
if (!LayerInfo(addition).isPower()) continue;
auto powerLayer = LayerInfo(addition).as<PowerLayer*>();
// first layer after abs must have scale of 1, offset of 1 and power of either 1 or -1
if (!fp32eq(powerLayer->scale, 1.0f) || !fp32eq(powerLayer->offset, 1.0f) || !fp32eq(std::abs(powerLayer->power), 1.0f)) continue;
// power == -1, offset = 1, scale = 1
if (fp32eq(powerLayer->power, -1.0f)) {
std::swap(addition, power);
} else { // power = 1, offset = 1, scale - 1
power = getNthChild(addition, 0);
if (!LayerInfo(power).isPower()) continue;
auto powerLayer_1 = LayerInfo(power).as<PowerLayer*>();
// layer after addition must have power of -1, offset of 0 and scale of 1
if (!fp32eq(powerLayer_1->power, -1.0f) || !fp32eq(powerLayer_1->offset, 0.0f) || !fp32eq(powerLayer_1->scale, 1.0f)) continue;
}
if (!hasNChildren(power, 1)) continue;
auto mulSame = getNthChild(power, 0);
@ -333,9 +359,9 @@ void SubstituteSoftSignPass::run() {
// pattern matched - lets substitute
gnalog() << "SoftSign subgraph found consits of: \n"
<< "\t" << abs->name << "\n"
<< "\t" << power->name << "\n"
<< "\t" << mul->name << "\n"
<< "\t" << abs->name << "\n";
if (addition == nullptr) gnalog() << "\t" << addition->name << "\n";
gnalog() << "\t" << mul->name << "\n"
<< std::endl;
// creating softsign layer

View File

@ -0,0 +1,43 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "common_test_utils/test_constants.hpp"
#include "subgraph_tests/softsign.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "3276.7"},
{"GNA_COMPACT_MODE", "NO"}
}
};
std::vector<std::vector<size_t>> input_shapes = {
{1, 8},
{1, 42},
{1, 100},
{1, 128},
{1, 1, 64},
{1, 1, 1, 64},
{1, 1, 1, 100}
};
INSTANTIATE_TEST_CASE_P(smoke_Softsign, SoftsignTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs),
::testing::ValuesIn(input_shapes)),
SoftsignTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,39 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
typedef std::tuple<
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Configuration
std::vector<size_t> // Input Shapes
> softsignParams;
namespace LayerTestsDefinitions {
class SoftsignTest : public testing::WithParamInterface<softsignParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<softsignParams> obj);
void Run() override;
protected:
void SetUp() override;
private:
std::shared_ptr<ngraph::Function> GenerateNgraphFriendlySoftSign();
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,91 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ie_core.hpp>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <ie_plugin_config.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include <legacy/ngraph_ops/power.hpp>
#include "subgraph_tests/softsign.hpp"
#include "ngraph_functions/builders.hpp"
namespace LayerTestsDefinitions {
std::string SoftsignTest::getTestCaseName(testing::TestParamInfo<softsignParams> obj) {
InferenceEngine::Precision netPrecision;
std::vector<size_t> inputShape;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::tie(netPrecision, targetDevice, configuration, inputShape) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
for (auto const& configItem : configuration) {
result << "_configItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}
void SoftsignTest::SetUp() {
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> tempConfig;
std::vector<size_t> inputShape;
std::tie(netPrecision, targetDevice, tempConfig, inputShape) = this->GetParam();
configuration.insert(tempConfig.begin(), tempConfig.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
auto abs = std::make_shared<ngraph::op::Abs>(params[0]);
auto add = std::make_shared<ngraph::op::PowerIE>(abs, 1, 1, 1);
auto power = std::make_shared<ngraph::op::PowerIE>(add, -1, 1, 0);
auto mul = std::make_shared<ngraph::op::Multiply>(power, params[0]);
ngraph::ResultVector results{ std::make_shared<ngraph::op::Result>(mul) };
function = std::make_shared<ngraph::Function>(results, params, "SoftSignTest");
}
void SoftsignTest::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
LoadNetwork();
Infer();
function = GenerateNgraphFriendlySoftSign();
Validate();
}
std::shared_ptr<ngraph::Function> SoftsignTest::GenerateNgraphFriendlySoftSign() {
InferenceEngine::Precision netPrecision = std::get<0>(this->GetParam());
std::vector<size_t> inputShape = std::get<3>(this->GetParam());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
auto abs = std::make_shared<ngraph::op::Abs>(params[0]);
auto constant_0 = ngraph::builder::makeConstant<float>(ngPrc, inputShape, { 1 });
auto add = std::make_shared<ngraph::op::Add>(abs, constant_0);
auto constant_1 = ngraph::builder::makeConstant<float>(ngPrc, inputShape, { -1 });
auto power = std::make_shared<ngraph::op::Power>(add, constant_1);
auto mul = std::make_shared<ngraph::op::Multiply>(power, params[0]);
ngraph::ResultVector results{ std::make_shared<ngraph::op::Result>(mul) };
return std::make_shared<ngraph::Function>(results, params, "SoftSignTest");
}
TEST_P(SoftsignTest, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions