[GNA] Added fix multiple output with one go to memory and test (#669)

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

Added multi output

Update gna_pass_manager.cpp

test

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

Added multi output

Update gna_pass_manager.cpp

test

tests

[GNA] Added fix multiple output with one go to memory and test

[GNA] Added fix multiple output with one go to memory and test

Added multi output

Update gna_pass_manager.cpp

test

tests

Added pass

Test

test

tests_2

return old
This commit is contained in:
Andrey Dmitriev 2020-06-24 17:38:34 +03:00 committed by GitHub
parent 1bfe709e6c
commit cec12131e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 288 additions and 26 deletions

View File

@ -176,6 +176,63 @@ inline std::pair<InferenceEngine::CNNLayerPtr, int> CNNNetCheckNextLayerSkipCer
return CNNNetCheckNextLayerSkipCertain(outLayer->second, 0, 0, bOnlyCheck, shouldSkip);
}
/**
* @brief return all layers reachable from given one
* @param layer
* @param oDataIdx - -1 means iterate over all odata indexes
* @param shouldSkip
* @return
*/
template <class Layer>
inline std::vector<CNNLayerPtr> CNNNetGetAllNextLayersSkipCertain(Layer layer, int oDataIdx, const std::function<bool(CNNLayerPtr)> &shouldSkip) {
// TODO: need to have generic function that creates slice of the graph : starting from given layer
// and skipped all non functional - ending up into functional one
std::list<CNNLayerPtr> currentSet;
std::vector<CNNLayerPtr> resultSet;
std::vector<std::map<std::string, CNNLayerPtr>> start;
if (oDataIdx == -1) {
for (int i = 0; i != layer->outData.size(); i++) {
start.push_back(layer->outData[i]->getInputTo());
}
} else {
start.push_back(layer->outData[oDataIdx]->getInputTo());
}
auto separate_layers = [&currentSet, &resultSet, &shouldSkip](std::map<std::string, CNNLayerPtr>& inputTo) {
for (auto &&bfsLayer : inputTo) {
if (shouldSkip(bfsLayer.second)) {
currentSet.push_back(bfsLayer.second);
continue;
}
resultSet.push_back(bfsLayer.second);
}
};
int startIdx, endIdx;
if (oDataIdx == -1) {
startIdx = 0;
endIdx = layer->outData.size();
} else {
startIdx = oDataIdx;
endIdx = oDataIdx + 1;
}
for (int i = startIdx; i != endIdx; i++) {
separate_layers(layer->outData[i]->getInputTo());
}
while (!currentSet.empty()) {
auto currentLayer = currentSet.front();
currentSet.pop_front();
for (auto && oData : currentLayer->outData) {
separate_layers(oData->getInputTo());
}
}
return resultSet;
}
/// @brief alias for strict checkNextLayer (false)
template <class Layer>
inline std::pair<InferenceEngine::CNNLayerPtr, int> CNNNetGetNextLayerSkipCertain(Layer layer, int oidx, int iidx,
@ -474,7 +531,31 @@ inline void CNNNetworkInsertLayer(CNNLayerPtr after,
}
/**
* @brief remove givven layer from topology, currently only layers with one input data and one output data supported
* @brief returns previous layers and outData index for it
* @tparam T
* @param origin
* @param acceptanceCriteria
* @param idx
*/
template <class T>
std::vector<std::pair<CNNLayerPtr, int> > CNNNetGetPrevLayersSkip(CNNLayerPtr origin, const T &acceptanceCriteria, int idx = -1) {
std::vector<std::pair<CNNLayerPtr, int> > prevLayers;
for (int i = idx == -1 ? 0 : idx; CNNNetHasPrevLayer(origin.get(), i) && (idx == -1 || i == idx); i++) {
auto prevLayer = CNNNetPrevLayer(origin, i);
if (acceptanceCriteria(prevLayer)) {
prevLayers.push_back({prevLayer, CNNLayerFindOutDataIdx(origin, i)});
} else {
// if for some input we need to look in upper layers - original index not used here intentionally
auto prevPrevLayers = CNNNetGetPrevLayersSkip(prevLayer, acceptanceCriteria);
prevLayers.insert(prevLayers.end(), prevPrevLayers.begin(), prevPrevLayers.end());
}
}
return prevLayers;
}
/**
* @brief remove given layer from topology, currently only layers with one input data and one output data supported
*/
inline void CNNNetworkRemoveLayer(CNNLayerPtr layer) {
if (!layer) {

View File

@ -373,6 +373,7 @@ void GNAPlugin::LoadNetwork(ICNNNetwork &network) {
passes->registerPass<InsertDiagonalLayerPass>();
passes->registerPass<HandleMultipleActivationsForTheLayerPass>();
passes->registerPass<SubstituteScaleShiftBroadCastPass>();
passes->registerPass<FuseMultipleIdentitiesPass>();
passIdx = passes->run(passIdx);
};

View File

@ -107,6 +107,9 @@ class LayerInfo {
bool isConcatAlignFilter() const noexcept {
return isOfType("ConcatAlignFilter");
}
bool isLink() const noexcept {
return isOfType("Link");
}
bool isAffineFilter() const noexcept {
return isOfType("AffineFilter");
}

View File

@ -609,31 +609,6 @@ void InsertIdentityLayerPass::run() {
}
}
/**
* @brief returns previous layers and insData index for it
* @tparam T
* @param origin
* @param acceptanceCriteria
* @param idx
*/
// give previous layers while skipping certain layer according to expression
template <class T>
std::vector<std::pair<CNNLayerPtr, int> > CNNNetGetPrevLayersSkip(CNNLayerPtr origin, const T &acceptanceCriteria, int idx = -1) {
std::vector<std::pair<CNNLayerPtr, int> > prevLayers;
for (int i = idx == -1 ? 0 : idx; CNNNetHasPrevLayer(origin.get(), i) && (idx == -1 || i == idx); i++) {
auto prevLayer = CNNNetPrevLayer(origin, i);
if (acceptanceCriteria(prevLayer)) {
prevLayers.push_back({prevLayer, CNNLayerFindOutDataIdx(origin, i)});
} else {
// if for some input we need to look in upper layers - original index not used here intentionally
auto prevPrevLayers = CNNNetGetPrevLayersSkip(prevLayer, acceptanceCriteria);
prevLayers.insert(prevLayers.end(), prevPrevLayers.begin(), prevPrevLayers.end());
}
}
return prevLayers;
}
void InsertCopyLayerPass::run() {
for (auto & l : *pLayers) {
if (l->insData.empty()) continue;
@ -1084,6 +1059,78 @@ void RemoveConstPass::run() {
transformer.fullTrim();
}
void FuseMultipleIdentitiesPass::run() {
for (auto &l : *pLayers) {
if (l->insData.empty()) continue;
auto isNonFunctional = [](CNNLayerPtr ptr) {
return LayerInfo(ptr).isNonFunctional();
};
auto eltwise = dynamic_cast<InferenceEngine::EltwiseLayer *>(l.get());
auto concat = dynamic_cast<InferenceEngine::ConcatLayer *>(l.get());
if (LayerInfo(l).isNonFunctional() || LayerInfo(l).has32BInput())
continue;
gnalog() << "CNNNetPrevLayer skip non functional from :: " << l->name;
auto prevLayersReached = CNNNetGetPrevLayersSkip(l, [](CNNLayerPtr ptr) {
return !LayerInfo(ptr).isNonFunctional();
});
prevLayersReached.erase(std::remove_if(prevLayersReached.begin(),
prevLayersReached.end(),
[] (const std::pair<CNNLayerPtr, int> & candidate) {
return LayerInfo(candidate.first).isLink();
}), prevLayersReached.end());
if (prevLayersReached.size() != 1 && eltwise == nullptr && concat == nullptr) {
std::stringstream layers;
for (auto && prevLayer : prevLayersReached) {
layers << prevLayer.first->name;
layers << ", ";
}
THROW_GNA_LAYER_EXCEPTION(l) << "unsupported case: connected to "
<< (prevLayersReached.empty() ? "zero" : "multiple") << " outputs : " << layers.str();
}
auto prevLayer = prevLayersReached.front().first;
auto outDataIdx = prevLayersReached.front().second;
gnalog() << ", reached " << prevLayer->name << " at " << outDataIdx << std::endl;
if (!LayerInfo(prevLayer).has32BOutput())
continue;
std::vector<CNNLayerPtr> resultSet = CNNNetGetAllNextLayersSkipCertain(prevLayer, outDataIdx, isNonFunctional);
// now result set should have all needed layers
// checking that result set consist of already identity
CNNLayerPtr alreadyIdentity;
for (auto &&res : resultSet) {
if (LayerInfo(res).isIdentity()) {
alreadyIdentity = res;
break;
}
}
if (!alreadyIdentity) {
continue;
} else {
// just figure out how to connect to that "already identity"
// 1st stage - disconnect given layer from previous
auto directPrev = l->insData.front().lock()->getCreatorLayer().lock();
auto oDataIdx = CNNLayerFindOutDataIdx(directPrev, 0);
auto &inputTo = directPrev->outData[oDataIdx]->getInputTo();
for (auto inIterator = inputTo.begin(); inIterator != inputTo.end(); inIterator++) {
if (inIterator->second == l) {
inputTo.erase(inIterator);
break;
}
}
l->insData.clear();
//2nd stage - now setting up new connection
l->insData.push_back(alreadyIdentity->outData.front());
alreadyIdentity->outData.front()->getInputTo()[l->name] = l;
}
}
}
int PassManager::run(int index) {
// #define PLOT
#ifdef PLOT

View File

@ -149,6 +149,11 @@ DECL_PASS_BEFORE_COPY(UnrollTI);
*/
DECL_PASS_BEFORE_COPY(RemoveConst);
/**
* @brief removed extra identity layer for multi-output
*/
DECL_PASS(FuseMultipleIdentities);
struct PassManagerSettings {
Policy policy;
/// @brief whether to run passes before copy

View File

@ -0,0 +1,39 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <vector>
#include "subgraph_tests/multioutput_eltwise_squeeze_eltwise.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
std::vector<std::vector<std::vector<size_t>>> inputs{
{{1, 16}},
{{2, 16}},
{{1, 160}},
{{8, 40}},
{{3, 8}},
{{4, 32}},
{{5, 64}},
{{6, 128}},
{{7, 256}},
{{8, 512}},
{{8, 1024}}
};
std::map<std::string, std::string> additional_config = {
{"GNA_COMPACT_MODE", "NO"},
};
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
INSTANTIATE_TEST_CASE_P(multioutput_eltwise_identity, MultioutputEltwiseReshapeEltwise,
::testing::Combine(
::testing::ValuesIn(inputs),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(additional_config)),
MultioutputEltwiseReshapeEltwise::getTestCaseName);
} // namespace

View File

@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
std::vector<std::vector<size_t>>, //input shapes
InferenceEngine::Precision, //Network precision
std::string, //Device name
std::map<std::string, std::string> //Configuration
> MultioutputEltwiseReshapeEltwiseTuple;
class MultioutputEltwiseReshapeEltwise
: public testing::WithParamInterface<MultioutputEltwiseReshapeEltwiseTuple>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MultioutputEltwiseReshapeEltwiseTuple> &obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,56 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include <debug.h>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "subgraph_tests/multioutput_eltwise_squeeze_eltwise.hpp"
namespace LayerTestsDefinitions {
std::string MultioutputEltwiseReshapeEltwise::getTestCaseName(const testing::TestParamInfo<MultioutputEltwiseReshapeEltwiseTuple> &obj) {
std::vector<std::vector<size_t>> input;
InferenceEngine::Precision netPrecision;
std::string targetName;
std::map<std::string, std::string> additional_config;
std::tie(input, netPrecision, targetName, additional_config) = obj.param;
std::ostringstream results;
results << "IS=" << CommonTestUtils::vec2str(input[0]) << "_";
results << "netPRC=" << netPrecision.name() << "_";
results << "targetDevice=" << targetName << "_";
return results.str();
}
void MultioutputEltwiseReshapeEltwise::SetUp() {
std::vector<std::vector<size_t>> inputs;
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> additional_config;
std::tie(inputs, netPrecision, targetDevice, additional_config) = this->GetParam();
configuration.insert(additional_config.begin(), additional_config.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto input = ngraph::builder::makeParams(ngPrc, {inputs});
auto eltwise_const = ngraph::builder::makeConstant(ngPrc,
ngraph::Shape{input[0]->get_shape()},
std::vector<float>{-1.0f});
auto eltwise = std::make_shared<ngraph::opset1::Multiply>(input[0], eltwise_const);
auto squeeze = ngraph::builder::makeSqueezeUnsqueeze(eltwise, ngPrc, {0}, ngraph::helpers::SqueezeOpType::UNSQUEEZE);
auto unsqueeze = ngraph::builder::makeSqueezeUnsqueeze(squeeze, ngPrc, {0}, ngraph::helpers::SqueezeOpType::SQUEEZE);
auto eltwise_const2 = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector<float>{1.01f});
auto eltwise_const3 = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector<float>{1.01f});
auto eltwise2 = std::make_shared<ngraph::opset1::Multiply>(eltwise, eltwise_const2);
auto eltwise3 = std::make_shared<ngraph::opset1::Multiply>(unsqueeze, eltwise_const3);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(eltwise2),
std::make_shared<ngraph::opset1::Result>(eltwise3)};
function = std::make_shared<ngraph::Function>(results, input, "eltwise_reshape_eltwise_multioutput");
}
TEST_P(MultioutputEltwiseReshapeEltwise, CompareWithRefs){
Run();
};
} // namespace LayerTestsDefinitions