[GNA] fix scale factor calculation for unfused bias after fc (#2097)
* [GNA] fix scale factor calculation for unfused bias after fc * change check * add test * apply requested changes * cpplint fix * apply test changes * modify model for test to match ::op::
This commit is contained in:
parent
e55653b519
commit
3ecee2ce49
@ -309,7 +309,17 @@ class ScaleFactorPerLayer<InferenceEngine::EltwiseLayer*> {
|
||||
case InferenceEngine::EltwiseLayer::Sub:
|
||||
case InferenceEngine::EltwiseLayer::Sum: {
|
||||
// detect which input will be used as biases
|
||||
if (LayerInfo(in0).has32BOutput()) {
|
||||
auto findPrevFunctional = [](InferenceEngine::CNNLayerPtr layer) {
|
||||
auto prev = InferenceEngine::CNNNetPrevLayer(layer, 0);
|
||||
while (CNNNetHasPrevLayer(prev.get(), 0) && LayerInfo(prev).isNonFunctional()) {
|
||||
prev = InferenceEngine::CNNNetPrevLayer(prev, 0);
|
||||
}
|
||||
|
||||
return prev;
|
||||
};
|
||||
|
||||
if (LayerInfo(in0).has32BOutput() ||
|
||||
(LayerInfo(in0).isNonFunctional() && CNNNetHasPrevLayer(in0.get(), 0) && LayerInfo(findPrevFunctional(in0)).has32BOutput())) {
|
||||
std::swap(in0, in1);
|
||||
std::swap(quantParams0, quantParams1);
|
||||
}
|
||||
|
@ -0,0 +1,47 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "subgraph_tests/matmul_squeeze_add.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{ }
|
||||
};
|
||||
|
||||
std::vector<std::vector<size_t>> input_shapes = {
|
||||
{1, 8},
|
||||
{1, 42},
|
||||
{1, 100},
|
||||
{1, 128},
|
||||
{1, 512}
|
||||
};
|
||||
|
||||
std::vector<size_t> output_sizes = {
|
||||
1000,
|
||||
512,
|
||||
128,
|
||||
42,
|
||||
16,
|
||||
8
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::ValuesIn(configs),
|
||||
::testing::ValuesIn(input_shapes),
|
||||
::testing::ValuesIn(output_sizes)),
|
||||
MatmulSqueezeAddTest::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,50 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "subgraph_tests/matmul_squeeze_add.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{
|
||||
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
|
||||
{"GNA_SCALE_FACTOR_0", "81.9175"}
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::vector<size_t>> input_shapes = {
|
||||
{1, 8},
|
||||
{1, 42},
|
||||
{1, 100},
|
||||
{1, 128},
|
||||
{1, 512}
|
||||
};
|
||||
|
||||
std::vector<size_t> output_sizes = {
|
||||
1000,
|
||||
512,
|
||||
128,
|
||||
42,
|
||||
16,
|
||||
8
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GNA),
|
||||
::testing::ValuesIn(configs),
|
||||
::testing::ValuesIn(input_shapes),
|
||||
::testing::ValuesIn(output_sizes)),
|
||||
MatmulSqueezeAddTest::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,47 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "subgraph_tests/matmul_squeeze_add.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{ }
|
||||
};
|
||||
|
||||
std::vector<std::vector<size_t>> input_shapes = {
|
||||
{1, 8},
|
||||
{1, 42},
|
||||
{1, 100},
|
||||
{1, 128},
|
||||
{1, 512}
|
||||
};
|
||||
|
||||
std::vector<size_t> output_sizes = {
|
||||
1000,
|
||||
512,
|
||||
128,
|
||||
42,
|
||||
16,
|
||||
8
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
::testing::ValuesIn(configs),
|
||||
::testing::ValuesIn(input_shapes),
|
||||
::testing::ValuesIn(output_sizes)),
|
||||
MatmulSqueezeAddTest::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
|
||||
typedef std::tuple<
|
||||
InferenceEngine::Precision, // Network Precision
|
||||
std::string, // Target Device
|
||||
std::map<std::string, std::string>, // Configuration
|
||||
std::vector<size_t>, // Input Shapes
|
||||
size_t // Output Size
|
||||
> matmulSqueezeAddParams;
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
class MatmulSqueezeAddTest : public testing::WithParamInterface<matmulSqueezeAddParams>,
|
||||
public LayerTestsUtils::LayerTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<matmulSqueezeAddParams> obj);
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -0,0 +1,91 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <ie_plugin_config.hpp>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "ngraph_functions/pass/convert_prc.hpp"
|
||||
|
||||
#include "subgraph_tests/matmul_squeeze_add.hpp"
|
||||
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
std::string MatmulSqueezeAddTest::getTestCaseName(testing::TestParamInfo<matmulSqueezeAddParams> obj) {
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::vector<size_t> inputShape;
|
||||
std::size_t outputSize;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
std::tie(netPrecision, targetDevice, configuration, inputShape, outputSize) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
|
||||
result << "OS=" << outputSize << "_";
|
||||
result << "netPRC=" << netPrecision.name() << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
for (auto const& configItem : configuration) {
|
||||
result << "_configItem=" << configItem.first << "_" << configItem.second;
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void MatmulSqueezeAddTest::SetUp() {
|
||||
auto generateFloatNumbers = [](float startFrom, float upTo, std::size_t vec_len) {
|
||||
std::vector<float> res;
|
||||
|
||||
std::mt19937 gen(
|
||||
static_cast<float>(std::chrono::high_resolution_clock::now().time_since_epoch().count()));
|
||||
|
||||
std::uniform_real_distribution<float> dist(startFrom, upTo);
|
||||
|
||||
for (int i = 0; i < vec_len; i++)
|
||||
res.emplace_back(static_cast<float>(dist(gen)));
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::map<std::string, std::string> tempConfig;
|
||||
std::vector<size_t> inputShape;
|
||||
size_t outputSize;
|
||||
std::tie(netPrecision, targetDevice, tempConfig, inputShape, outputSize) = this->GetParam();
|
||||
configuration.insert(tempConfig.begin(), tempConfig.end());
|
||||
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
|
||||
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
|
||||
|
||||
auto constant_0 = ngraph::builder::makeConstant<float>(ngPrc, { outputSize, inputShape[1] },
|
||||
generateFloatNumbers(0, 1, outputSize * inputShape[1]), false);
|
||||
auto matmul_0 = std::make_shared<ngraph::op::MatMul>(params[0], constant_0, false, true);
|
||||
|
||||
auto constant_1 = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector<size_t>{0});
|
||||
auto unsqueeze_0 = std::make_shared<ngraph::op::Unsqueeze>(matmul_0, constant_1);
|
||||
|
||||
auto constant_2 = ngraph::builder::makeConstant<float>(ngPrc, { 1, inputShape[0], outputSize },
|
||||
generateFloatNumbers(0, 1, inputShape[0] * outputSize), false);
|
||||
auto add_0 = std::make_shared<ngraph::op::Add>(unsqueeze_0, constant_2);
|
||||
|
||||
auto constant_3 = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector<size_t>{0});
|
||||
auto squeeze_0 = std::make_shared<ngraph::op::Squeeze>(add_0, constant_3);
|
||||
|
||||
ngraph::ResultVector results {std::make_shared<ngraph::op::Result>(squeeze_0)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "MatmulSqueezeAddTest");
|
||||
}
|
||||
|
||||
TEST_P(MatmulSqueezeAddTest, CompareWithRefImpl) {
|
||||
Run();
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
Loading…
Reference in New Issue
Block a user