[GNA] Added Multiplication 1x1x1 (#2470)

* [GNA] Broadcast power primitive

[GNA] Broadcast power primitive

[GNA] Added Multiplication 1x1x1

[GNA] Broadcast power primitive

Added secondary changes

* Added fix test
This commit is contained in:
Andrey Dmitriev 2020-11-10 20:56:21 +03:00 committed by GitHub
parent 634109acfa
commit bd091500cd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 148 additions and 3 deletions

View File

@ -501,12 +501,23 @@ void GNAGraphCompiler::PowerPrimitive(InferenceEngine::CNNLayerPtr layer) {
auto input = layer->insData[0].lock();
auto outputs = *layer->outData.begin();
uint32_t num_rows_in = FROM_IR_DIM(input, 1);
uint32_t num_columns_in = FROM_IR_DIM(input, 2);
uint32_t num_rows_in = InferenceEngine::details::product(begin(input->getDims()), end(input->getDims()));
uint32_t num_columns_in = 1;
uint32_t num_rows_out = num_rows_in;
uint32_t num_padding = ALIGN(num_rows_in, 8) - num_rows_in;
if (input->getDims().size() > 2 || input->getDims()[0] >= 8) {
for (size_t index_divide = 8; index_divide > 0; index_divide--) {
if (num_rows_in % index_divide == 0) {
num_rows_in /= index_divide;
num_columns_in = index_divide;
break;
}
}
num_rows_out = num_rows_in;
num_padding = ALIGN(num_rows_in, 8) - num_rows_in;
}
size_t num_data_bytes_out = InferenceEngine::details::product(begin(outputs->getDims()), end(outputs->getDims()))
* outputs->getPrecision().size();

View File

@ -0,0 +1,49 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <vector>
#include <string>
#include <ie_precision.hpp>
#include <subgraph_tests/broadcast_power.hpp>
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
const std::vector<std::map<std::string, std::string>> configs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_COMPACT_MODE", "NO"},
{"GNA_SCALE_FACTOR_0", "2048"},
}
};
const std::vector<std::vector<std::vector<size_t>>> input_shapes {
{{1, 8224}, {1, 257, 32}},
{{2, 8224}, {1, 257, 64}},
{{4, 8224}, {1, 257, 128}},
{{8, 128}, {8, 128}},
{{16, 128}, {16, 128}},
{{18, 128}, {18, 128}},
{{1, 16, 1, 128}, {1, 16, 1, 128}},
{{1, 8, 15, 128}, {1, 8, 15, 128}},
{{4, 4, 4, 4}, {4, 4, 4, 4}},
{{1, 4, 4, 128}, {1, 4, 4, 128}}
//TODO: needed add split over channels
// {{8, 8224}},
};
INSTANTIATE_TEST_CASE_P(PowerBroadcast, BroadcastPowerTest,
::testing::Combine(
::testing::ValuesIn(input_shapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs)),
BroadcastPowerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,33 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
typedef std::tuple<
std::vector<std::vector<size_t>>, // Input shapes
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string> //Configuration
> BroadCastPowerTuple;
namespace LayerTestsDefinitions {
class BroadcastPowerTest : public testing::WithParamInterface<BroadCastPowerTuple>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<BroadCastPowerTuple> obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,52 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <debug.h>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "subgraph_tests/broadcast_power.hpp"
namespace LayerTestsDefinitions {
std::string BroadcastPowerTest::getTestCaseName(testing::TestParamInfo<BroadCastPowerTuple> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> configuration;
std::vector<std::vector<size_t>> inputs_shapes;
std::tie(inputs_shapes, netPrecision, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "inputShape=" << CommonTestUtils::vec2str(inputs_shapes) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
for (auto const& configItem : configuration) {
result << "_configItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}
void BroadcastPowerTest::SetUp() {
InferenceEngine::Precision netPrecision;
std::vector<std::vector<size_t>> inputs_shapes;
std::tie(inputs_shapes, netPrecision, targetDevice, configuration) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputs_shapes[0]});
auto reshape_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{inputs_shapes[1].size()},
inputs_shapes[1]);
auto reshape = std::make_shared<ngraph::opset1::Reshape>(params[0], reshape_pattern, false);
auto const_mult2 = ngraph::builder::makeConstant<float>(ngPrc, {}, {-1.0f});
auto sum = ngraph::builder::makeEltwise(reshape, const_mult2, ngraph::helpers::EltwiseTypes::MULTIPLY);
auto reshape_pattern_2 = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{inputs_shapes[0].size()},
inputs_shapes[0]);
auto reshape_2 = std::make_shared<ngraph::opset1::Reshape>(sum, reshape_pattern_2, false);
function = std::make_shared<ngraph::Function>(reshape_2, params, "BroadcastPowerPass");
}
TEST_P(BroadcastPowerTest, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions