[GNA] Added test for ScaleShift and fixed power layer with non-zero shift (#922)

* [GNA] Added test ScaleShift and fixed power layer with non zero shift

added tests

[GNA] Added test ScaleShift and fixed power layer with non zero shift

* Test Assert

* rebuild
This commit is contained in:
Andrey Dmitriev
2020-06-16 00:32:28 +03:00
committed by GitHub
parent 4f4352f301
commit 6079a35b81
4 changed files with 135 additions and 22 deletions

View File

@@ -386,29 +386,14 @@ void GNAGraphCompiler::PowerPrimitive(InferenceEngine::CNNLayerPtr layer) {
if (gnaFlags->sw_fp32) {
gnamem->readonly().push_value(ptr_weights, power.scale, num_rows_out, 64);
gnamem->readonly().push_value(ptr_biases, power.scale, num_rows_out, 64);
gnamem->readonly().push_value(ptr_biases, power.offset, num_rows_out, 64);
} else {
auto weightsScaledIdentity = power.scale;
auto biasesScaledIdentity = power.scale;
if (quantized != nullptr) {
weightsScaledIdentity = quantized->_weights_quant.scale * weightsScaledIdentity;
biasesScaledIdentity = quantized->_bias_quant.scale * biasesScaledIdentity;
}
auto weightQuantizedIdentity = FLOAT_TO_INT16(std::min(weightsScaledIdentity, static_cast<float>(INT16_MAX)));
auto biasesQuantizedIdentity = FLOAT_TO_INT16(std::min(biasesScaledIdentity, static_cast<float>(INT16_MAX)));
gnamem->readonly().push_value<int16_t>(ptr_weights, weightQuantizedIdentity, num_rows_out, 64);
gnamem->readonly().push_value<int32_t>(ptr_biases, biasesQuantizedIdentity, num_rows_out, 64);
}
if (power.offset != 0.0f) {
if (quantized == nullptr) {
gnamem->readonly().push_value(ptr_biases, 0.0f, num_rows_out, 64);
} else {
gnamem->readonly().push_value<int32_t>(ptr_biases, 0, num_rows_out, 64);
}
} else {
gnamem->readonly().push_value(ptr_biases, 0.0f, num_rows_out, 64);
auto quantizedScale = FLOAT_TO_INT16(std::min(quantized->_weights_quant.scale * power.scale,
static_cast<float>(INT16_MAX)));
auto quantizedOffset = FLOAT_TO_INT32(std::min(quantized->_dst_quant.scale * power.offset,
static_cast<float>(INT32_MAX)));
gnamem->readonly().push_value<int16_t>(ptr_weights, quantizedScale, num_rows_out, 64);
gnamem->readonly().push_value<int32_t>(ptr_biases, quantizedOffset, num_rows_out, 64);
}
}

View File

@@ -0,0 +1,53 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/scaleshift.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
{{1, 8}},
{{2, 16}},
{{3, 32}},
{{4, 64}},
{{5, 128}},
{{6, 256}},
{{7, 512}},
{{8, 1024}}
};
std::vector<std::vector<float >> Scales = {
{2.0f},
{3.0f},
{-1.0f},
{-2.0f},
{-3.0f}
};
std::vector<std::vector<float >> Shifts = {
{1.0f},
{2.0f},
{3.0f},
{-1.0f},
{-2.0f},
{-3.0f}
};
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
INSTANTIATE_TEST_CASE_P(scale_shift, ScaleShiftLayerTest,
::testing::Combine(
::testing::ValuesIn(inShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(Scales),
::testing::ValuesIn(Shifts)),
ScaleShiftLayerTest::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include "functional_test_utils/layer_test_utils.hpp"
#include "../../../../../ngraph_functions/include/ngraph_functions/builders.hpp"
#include "common_test_utils/test_constants.hpp"
namespace LayerTestsDefinitions {
using ScaleShiftParamsTuple = typename std::tuple<
std::vector<std::vector<size_t>>, //input shapes
InferenceEngine::Precision, //Network precision
std::string, //Device name
std::vector<float>, //scale
std::vector<float>>; //shift
class ScaleShiftLayerTest:
public testing::WithParamInterface<ScaleShiftParamsTuple>,
public LayerTestsUtils::LayerTestsCommon{
public:
static std::string getTestCaseName(const testing::TestParamInfo<ScaleShiftParamsTuple> &obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <debug.h>
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "subgraph_tests/scaleshift.hpp"
namespace LayerTestsDefinitions {
std::string ScaleShiftLayerTest::getTestCaseName(const testing::TestParamInfo<ScaleShiftParamsTuple> &obj) {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
std::string targetName;
std::vector<float> scale, shift;
std::tie(inputShapes, netPrecision, targetName, scale, shift) = obj.param;
std::ostringstream results;
results << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
results << "Scale=" << CommonTestUtils::vec2str(scale) << "_";
results << "Shift=" << CommonTestUtils::vec2str(shift) << "_";
results << "netPRC=" << netPrecision.name() << "_";
results << "targetDevice=" << targetName << "_";
return results.str();
}
void ScaleShiftLayerTest::SetUp() {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
std::vector<float> scale, shift;
std::tie(inputShapes, netPrecision, targetDevice, scale, shift) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto paramsIn = ngraph::builder::makeParams(ngPrc, {inputShapes[0]});
auto mul_const = std::make_shared<ngraph::op::Constant>(ngPrc, ngraph::Shape{1}, scale);
auto mul = std::make_shared<ngraph::opset1::Multiply>(paramsIn[0], mul_const);
auto add_const = std::make_shared<ngraph::op::Constant>(ngPrc, ngraph::Shape{1}, shift);
auto add = std::make_shared<ngraph::opset1::Add>(mul, add_const);
function = std::make_shared<ngraph::Function>(add, paramsIn, "scale_shift");
}
TEST_P(ScaleShiftLayerTest, CompareWithRefs){
Run();
};
} // namespace LayerTestsDefinitions