[GNA] Fix for concat layer with >2 inputs (#1475)

* Fix for concat layer with more than 2 inputs

Signed-off-by: Bartosz Sochacki <bartosz.sochacki@intel.com>

* Fixed check if affine is used for crop layer

Signed-off-by: Bartosz Sochacki <bartosz.sochacki@intel.com>

* code cleanup for fix affine layer check

Signed-off-by: Bartosz Sochacki <bartosz.sochacki@intel.com>

* added test for concat layer with multiple inputs

* simplified test to use less number of layers

* fixed code style

* fixed coding style

* addressed review comments and one more issue that appeared during testing

* fixed code style errors

* scale factor propagation for concat layer with multiple inputs

* fix for a case when all inputs to concat are activation layers

* fix for linux compilation - C++14 is not enabled and fails on lambda with auto parameters

* corrected current year in headers in concat multi input tests

* fixes for code review issues raised by Denis Orlov

* enabled integer mode computation in GNA concat multi input test

* removed 1 space per review comment

* a fix to fail when not all scale factors are equal

* added GNA_DEVICE_MODE config to concat multi input test

* corrected searching for a next input to concat layer

* changed selection of 2nd candidate for source quant value

* code style fix - else and brackets should be in the same line

* small code improvement

* fix for mixing line endings

* addressed with endless requantization loop and fixed failing tests
This commit is contained in:
Bartosz Sochacki 2020-09-09 13:55:07 +02:00 committed by GitHub
parent 135ae12b0d
commit 8b87e1a477
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 384 additions and 160 deletions

View File

@ -170,7 +170,7 @@ class ScaleFactorPerLayer<InferenceEngine::CNNLayer *> {
if (!fp32eq(quantSibling->_dst_quant.scale, 1)) {
// means we already restarted propagation input memory layer
// need to search for requantiseable layer prior memory output layer
InferenceEngine::CNNLayerPtr restartedLayer;
InferenceEngine::CNNLayerPtr restartedLayer;
gnalog() << "Memory layer :"<< input->name << " scale factor: " << quantSibling->_dst_quant.scale
<< " doesn't match its outputs counterpart: " << cnnLayer->name << " scale factor: " << inputQuant->_dst_quant.scale << "\n";
@ -382,119 +382,166 @@ class ScaleFactorPerLayer<InferenceEngine::ConcatLayer*> {
if ( !concatLayer ) {
THROW_GNA_EXCEPTION << "Incorrect Concat Layer pointer \n";
}
auto in0 = InferenceEngine::CNNNetPrevLayer(concatLayer, 0);
auto in1 = InferenceEngine::CNNNetPrevLayer(concatLayer, 1);
auto infoIn0 = LayerInfo(in0);
auto infoIn1 = LayerInfo(in1);
auto quantParams0 = InferenceEngine::getInjectedData<QuantizedLayerParams>(in0);
auto quantParams1 = InferenceEngine::getInjectedData<QuantizedLayerParams>(in1);
GNAPluginNS::QuantizedLayerParams* sourceQuantParams = NULL;
auto quantData = InferenceEngine::getInjectedData<QuantizedLayerParams>(*concatLayer);
if (concatLayer->insData.size() < 2) {
THROW_GNA_EXCEPTION << "Concat layer has unsupported number of incoming layers.";
}
auto fp32eq = [](float p1, float p2) -> bool {
return (std::abs(p1 - p2) <= 0.00001f * std::min(std::abs(p1), std::abs(p2)));
};
// if both inputs have same quant value - trivial propagation
if (fp32eq(quantParams0->_dst_quant.scale, quantParams1->_dst_quant.scale)) {
auto quantData = InferenceEngine::getInjectedData<QuantizedLayerParams>(*concatLayer);
std::vector<InferenceEngine::CNNLayerPtr> inputLayers;
for (auto input_idx = 0; input_idx != concatLayer->insData.size(); input_idx++) {
inputLayers.push_back(InferenceEngine::CNNNetPrevLayer(concatLayer, input_idx));
}
// if all inputs have same quant value - trivial propagation
auto in0 = inputLayers.front();
auto quantParams0 = InferenceEngine::getInjectedData<QuantizedLayerParams>(in0);
auto scaleFactor = quantParams0->_dst_quant.scale;
auto scaleFactorCheck = [scaleFactor, &fp32eq](InferenceEngine::CNNLayerPtr& inputLayer) {
auto quantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(inputLayer);
return fp32eq(quantParams->_dst_quant.scale, scaleFactor);
};
if (std::find_if_not(inputLayers.begin() + 1, inputLayers.end(), scaleFactorCheck) == inputLayers.end()) {
quantData->_dst_quant.scale = quantParams0->_dst_quant.scale;
quantData->_src_quant.scale = quantParams0->_dst_quant.scale;
return true;
}
// support only cases when one of input is network input
if (infoIn0.isInput() && infoIn1.isInput()) {
THROW_GNA_EXCEPTION << "Two Input layers " << in0->name << "and" << in1->name << " has different scales in concat!!! \n";
}
int concatIdxToUpdate = -1;
// check if all inputs have the same quant value
auto inputLayerCheck = [](InferenceEngine::CNNLayerPtr& inputLayer) {
auto info = LayerInfo(inputLayer);
return info.isInput();
};
if (infoIn0.isInput()) {
sourceQuantParams = quantParams0;
} else if (infoIn1.isInput()) {
concatIdxToUpdate = 0;
sourceQuantParams = quantParams1;
}
// possible case when some of the concat inputs are free to select scale ex: const->concat<-affine
if (quantParams1->_dst_quant.scale == 1.0) {
quantParams1->_weights_quant = quantParams0->_dst_quant;
quantParams1->_dst_quant = quantParams0->_dst_quant;
sourceQuantParams = quantParams0;
}
if (quantParams0->_dst_quant.scale == 1.0) {
quantParams0->_weights_quant = quantParams1->_dst_quant;
quantParams0->_dst_quant = quantParams1->_dst_quant;
sourceQuantParams = quantParams1;
}
if (!sourceQuantParams) {
auto in0LayerInfo = LayerInfo(in0);
auto in1LayerInfo = LayerInfo(in1);
if (in0LayerInfo.isActivation()) {
quantParams0->_weights_quant = quantParams1->_dst_quant;
quantParams0->_dst_quant = quantParams1->_dst_quant;
sourceQuantParams = quantParams1;
} else if (in1LayerInfo.isActivation()) {
quantParams1->_weights_quant = quantParams0->_dst_quant;
quantParams1->_dst_quant = quantParams0->_dst_quant;
sourceQuantParams = quantParams0;
} else {
THROW_GNA_LAYER_EXCEPTION(concatLayer) << "Concat quantization for " << in0->type << ": " << in0->name
<< " and " << in1->type << ": " << in1->name
<< " as inputs needs to be implemented! None of these inputs is an activation.\n";
GNAPluginNS::QuantizedLayerParams* sourceQuantParams = nullptr;
auto firstInputIt = std::find_if(inputLayers.begin(), inputLayers.end(), inputLayerCheck);
if (firstInputIt != inputLayers.end()) {
auto quantParamsFirst = InferenceEngine::getInjectedData<QuantizedLayerParams>(*firstInputIt);
auto nextInputIt = firstInputIt + 1;
while ((nextInputIt = std::find_if(nextInputIt, inputLayers.end(), inputLayerCheck)) != inputLayers.end()) {
auto quantParamsSecond = InferenceEngine::getInjectedData<QuantizedLayerParams>(*nextInputIt);
if (!fp32eq(quantParamsSecond->_dst_quant.scale, quantParamsFirst->_dst_quant.scale)) {
THROW_GNA_EXCEPTION << "Two Input layers " << (*firstInputIt)->name
<< " and " << (*nextInputIt)->name << " have different scales in concat!!! \n";
}
}
}
if (!fp32eq(quantParams0->_dst_quant.scale, quantParams1->_dst_quant.scale) && concatIdxToUpdate == -1) {
// find a source quant value
// - 1st candidate - non-activation layer with non-1 scale factor
// - 2nd candidate - 1st layer with non-1 scale factor
auto sourceLayerCheck = [&fp32eq](InferenceEngine::CNNLayerPtr& inputLayer) {
auto quantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(inputLayer);
LayerInfo info(inputLayer);
return !info.isActivation() && !fp32eq(quantParams->_dst_quant.scale, 1.0f);
};
static std::map<std::string, size_t> restarted_counter;
auto restartedCountIt = restarted_counter.find(concatLayer->name);
if (restartedCountIt == restarted_counter.end()) {
auto pos = restarted_counter.insert({ "concatLayer->name", 0 });
restartedCountIt = pos.first;
}
if (restartedCountIt->second % 2 == 1) {
std::reverse(inputLayers.begin(), inputLayers.end());
}
++restartedCountIt->second;
auto sourceLayerIt = std::find_if(inputLayers.begin(), inputLayers.end(), sourceLayerCheck);
if (sourceLayerIt == inputLayers.end()) {
auto nonDefaultScaleFactor = [&fp32eq](InferenceEngine::CNNLayerPtr& inputLayer) {
auto quantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(inputLayer);
return !fp32eq(quantParams->_dst_quant.scale, 1.0f);
};
sourceLayerIt = std::find_if(inputLayers.begin(), inputLayers.end(), nonDefaultScaleFactor);
}
std::set<size_t> concatIdxToUpdate;
if (sourceLayerIt != inputLayers.end()) {
auto quantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(*sourceLayerIt);
auto scaleFactor = quantParams->_dst_quant.scale;
sourceQuantParams = quantParams;
for (auto it = inputLayers.begin(); it != inputLayers.end(); ++it) {
auto quantParamsIn = InferenceEngine::getInjectedData<QuantizedLayerParams>(*it);
if (fp32eq(quantParamsIn->_dst_quant.scale, scaleFactor)) {
continue;
}
// possible case when some of the concat inputs are free to select scale ex: const->concat<-affine
if (!fp32eq(quantParamsIn->_dst_quant.scale, 1.0f) && !LayerInfo(*it).isActivation()) {
concatIdxToUpdate.insert(std::distance(inputLayers.begin(), it));
}
quantParamsIn->_weights_quant = quantParams->_dst_quant;
quantParamsIn->_dst_quant = quantParams->_dst_quant;
}
}
auto updatedScaleFactor = InferenceEngine::getInjectedData<QuantizedLayerParams>(in0)->_dst_quant.scale;
auto equalScaleFactor = [updatedScaleFactor, &fp32eq](InferenceEngine::CNNLayerPtr& inputLayer) {
auto quantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(inputLayer);
return fp32eq(quantParams->_dst_quant.scale, updatedScaleFactor);
};
auto layerIt = std::find_if_not(inputLayers.begin() + 1, inputLayers.end(), equalScaleFactor);
if (layerIt != inputLayers.end()) {
THROW_GNA_EXCEPTION << "layers entered into concat have different scale factors" << concatLayer->name;
}
quantData->_dst_quant.scale = sourceQuantParams->_dst_quant.scale;
quantData->_src_quant.scale = sourceQuantParams->_dst_quant.scale;
if (fp32eq(quantParams0->_dst_quant.scale, quantParams1->_dst_quant.scale) || concatIdxToUpdate == -1) {
if (layerIt == inputLayers.end() && concatIdxToUpdate.empty()) {
return true;
}
auto destinationQuantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(*concatLayer);
destinationQuantParams->_dst_quant.scale = sourceQuantParams->_dst_quant.scale;
for (auto& layerIdToUpdate : concatIdxToUpdate) {
auto destinationQuantParams = InferenceEngine::getInjectedData<QuantizedLayerParams>(*concatLayer);
destinationQuantParams->_dst_quant.scale = sourceQuantParams->_dst_quant.scale;
InferenceEngine::CNNLayerPtr restartedLayer;
// making a link activation possible without extra layer if first input to concat not a parent / indirect parent of second input
// using ufs - upper first search
gnalog() << "[UFS] searching for quantizeable layer prior: "<< concatLayer->name << ", via " << concatIdxToUpdate << "\n";
InferenceEngine::CNNLayerPtr restartedLayer;
// making a link activation possible without extra layer if first input to concat not a parent / indirect parent of second input
// using ufs - upper first search
gnalog() << "[UFS] searching for quantizeable layer prior: " << concatLayer->name << ", via " << layerIdToUpdate << "\n";
CNNNetDFS(InferenceEngine::CNNLayerPtr(concatLayer, [](InferenceEngine::CNNLayer *) {}),
[&restartedLayer, concatLayer](InferenceEngine::CNNLayerPtr layer) {
gnalog() << "[UFS] from : " << concatLayer->name << " reached: " << layer->name;
// found that direct input to concat is a indirect parent of align filter - so no link required
auto info = LayerInfo(layer);
if (!info.isWeightable() && !info.isActivation()) {
gnalog() << "... skipped\n";
return;
}
restartedLayer = layer;
gnalog() << "... OK, need requantize\n";
}, true, [&restartedLayer, &concatLayer, &concatIdxToUpdate](InferenceEngine::CNNLayer *from) {
CNNNetDFS(InferenceEngine::CNNLayerPtr(concatLayer, [](InferenceEngine::CNNLayer*) {}),
[&restartedLayer, concatLayer](InferenceEngine::CNNLayerPtr layer) {
gnalog() << "[UFS] from : " << concatLayer->name << " reached: " << layer->name;
// found that direct input to concat is a indirect parent of align filter - so no link required
auto info = LayerInfo(layer);
if (!info.isWeightable() && !info.isActivation()) {
gnalog() << "... skipped\n";
return;
}
restartedLayer = layer;
gnalog() << "... OK, need requantize\n";
}, true, [&restartedLayer, &concatLayer, &layerIdToUpdate](InferenceEngine::CNNLayer* from) {
// aborting UFS once found functional layer, and using only specified input of concat
return make_upstream_order(restartedLayer == nullptr ? from : nullptr,
from == concatLayer ? concatIdxToUpdate : -1);
from == concatLayer ? layerIdToUpdate : -1);
});
if (restartedLayer == nullptr) {
THROW_GNA_EXCEPTION << "cannot requantize " << concatIdxToUpdate << "input to concat: " << concatLayer->name;
}
auto quantDataForConCatInput = InferenceEngine::getInjectedData<QuantizedLayerParams>(*restartedLayer);
if (restartedLayer == nullptr) {
THROW_GNA_EXCEPTION << "cannot requantize " << layerIdToUpdate << "input to concat: " << concatLayer->name;
}
auto quantDataForConCatInput = InferenceEngine::getInjectedData<QuantizedLayerParams>(*restartedLayer);
auto restarLayerInfo = LayerInfo(restartedLayer);
if (restarLayerInfo.isActivation()) {
// requantize activation by just changing it's output scale factor
quantDataForConCatInput->_dst_quant.scale = sourceQuantParams->_dst_quant.scale;
}
auto restarLayerInfo = LayerInfo(restartedLayer);
if (restarLayerInfo.isActivation()) {
// requantize activation by just changing it's output scale factor
quantDataForConCatInput->_dst_quant.scale = sourceQuantParams->_dst_quant.scale;
}
result = ScaleFactorUpdateResult(restartedLayer.get());
result = ScaleFactorUpdateResult(restartedLayer.get());
}
return true;
}

View File

@ -110,6 +110,12 @@ void GNAGraphCompiler::fillConcatConnections(InferenceEngine::CNNLayerPtr layer)
InferenceEngine::details::product(begin(dataInput->getDims()),
end(dataInput->getDims())) * dataInput->getPrecision().size();
// concat align layer can have additional padding, so the size of layer needs to be calculated
// based on original number of rows
if (ptrConcatLayerInput->CheckParamPresence("original_num_rows")) {
layer_size = ptrConcatLayerInput->GetParamAsInt("original_num_rows") * dataInput->getPrecision().size();
}
layerInfoItem.concatInputLayers.emplace_back(GNAConcatLayer::ConcatConnectedLayerInfo{ptrConcatLayerInput->name, concat_size, layer_size});
concat_size += layer_size;
@ -848,7 +854,7 @@ void GNAGraphCompiler::CropPrimitive(InferenceEngine::CNNLayerPtr layer) {
size_t cropOffset = offset.front() * cropLayer->precision.size();
size_t cropOutputSize = dim.front() * cropLayer->precision.size();
if (ALIGN64(cropOffset) == cropOffset) {
if (!LayerInfo(cropLayer).isCropAffined()) {
// leave crop as it is
GNAPluginNS::GNACropLayer cropLayerInfoItem(layer);
std::string& id = layer->name;

View File

@ -260,10 +260,11 @@ class LayerInfo {
bool isCropAffined() const noexcept {
auto cropLayer = dynamic_cast<InferenceEngine::CropLayer *> (layer);
if (cropLayer != nullptr && !cropLayer->offset.empty()) {
try {
size_t cropOffset = cropLayer->offset.back() * cropLayer->precision.size();
return (ALIGN64(cropOffset) != cropOffset);
} catch (InferenceEngine::details::InferenceEngineException) {}
// currently crop layer only supports 2 bytes in int16 and int8 mode.
// In fp32 mode this is not necessary but is useful for testing
auto bytesPerCropElement = 2;
size_t cropOffset = cropLayer->offset.back() * bytesPerCropElement;
return (ALIGN64(cropOffset) != cropOffset);
}
return false;
}

View File

@ -703,14 +703,10 @@ void InsertCopyLayerPass::run() {
if (LayerInfo(l).isConcat() && LayerInfo(prevIndirectLayer).isCrop()) { bInsert = true; }
if (bInsert) {
if (LayerInfo(prevIndirectLayer).isCrop()) {
auto cropLayer = LayerInfo(prevIndirectLayer).as<CropLayer *>();
size_t cropOffset = cropLayer->offset.back() * cropLayer->precision.size();
if (ALIGN(cropOffset, 8) != cropOffset) {
// The crop will be replaced by affine.
// Copy layer insertion is not required
continue;
}
if (LayerInfo(prevIndirectLayer).isCropAffined()) {
// The crop will be replaced by affine.
// Copy layer insertion is not required
continue;
}
auto prevLayer = CNNNetPrevLayer(l, i);
InsertCopyLayer(prevLayer, l, i, getPassManager());
@ -788,8 +784,9 @@ void InsertConcatAligningFilterPass::run() {
size_t num_rows_out = num_rows_padded + num_rows_in;
// encodes offset to beginning of split layer input
size_t bytesOffset = (aligned64_offset / bytesPerConcatElement) * (quantized ? bytesPerConcatElement : 4);
concatAligningFilter->params["output_offset"] =
std::to_string((aligned64_offset / bytesPerConcatElement) * (quantized ? bytesPerConcatElement : 4));
std::to_string(bytesOffset);
// for padded rows we cannot use copy layer - TBD how to implement
concatAligningFilter->params["num_rows_padded"] = std::to_string(num_rows_padded);
@ -843,84 +840,92 @@ void ReorderConcatInputsPass::run() {
}
int numOfLinkLayers = 0;
for (auto & l : *pLayers) {
// 1st stage locate concat align filter
for (auto& l : *pLayers) {
// 1st stage locate concat
LayerInfo info(l);
if (!info.isConcatAlignFilter()) {
if (!info.isConcat()) {
continue;
}
// 2rd locating concat
if (l->outData.size() != 1) {
THROW_GNA_EXCEPTION << "no concat layer after concat aligning layer" << l->name;
}
auto nextLayers = getInputTo(l->outData.front());
if (nextLayers.size() != 1) {
THROW_GNA_EXCEPTION << "Invalid concat connection in align filter : " << l->name;
}
auto concat = nextLayers.begin()->second;
if (!LayerInfo(concat).isConcat()) {
THROW_GNA_EXCEPTION << "no concat layer after concat-aligning layer" << l->name << ", but was: " << concat->type;
}
// 3stage locate first input in concat
if (concat->insData.size() < 2) {
THROW_GNA_EXCEPTION << "Concat layer has unsupported number of incoming layers: " << concat->name;
}
auto inputsToConcatFirst = CNNNetGetPrevLayersSkip(concat, [](CNNLayerPtr origin){
return !LayerInfo(origin).isNonFunctional() && !LayerInfo(origin).isSplit();
}, 0);
if (inputsToConcatFirst.empty()) {
THROW_GNA_EXCEPTION << "cannot locate first input into concat layer: " << l;
// 2nd stage locate first input in concat
if (l->insData.size() < 2) {
THROW_GNA_EXCEPTION << "Concat layer has unsupported number of incoming layers: " << l->name;
}
auto firstInputToConcat = inputsToConcatFirst.front().first;
// concat has first input of concat align filter - dont need to reorder it
if (firstInputToConcat == l) {
continue;
}
bool bFinish = false;
// making a link activation possible without extra layer if first input to concat not a parent / indirect parent of second input
// using ufs - upper first search
gnalog() << "[UFS] searching for: "<< firstInputToConcat->name << "\n";
CNNNetDFS(l, [&l, &firstInputToConcat, &bFinish](CNNLayerPtr layer) {
gnalog() << "[UFS] from : "<< l->name <<" reached: " << layer->name << "\n";
// found that direct input to concat is a indirect parent of align filter - so no link required
if (layer.get() == firstInputToConcat.get() || LayerInfo(firstInputToConcat).isInput()) {
gnalog() << "[UFS] copy layer insertion needed\n";
bFinish = true;
auto concatLayer = info.as<ConcatLayer*>();
auto getLayerByIndex = [&concatLayer](int idx) {
auto input = concatLayer->insData[idx];
auto lockedInput = input.lock();
if (!lockedInput) {
THROW_GNA_EXCEPTION << "cannot get insdata : " << idx << " for layer: " << concatLayer->name;
}
}, true, [&bFinish](InferenceEngine::CNNLayer* from) {
// aborting UFS once link not need
return make_upstream_order(!bFinish ? from : nullptr);
});
return lockedInput;
};
auto linkName = std::string("link_") + std::to_string(numOfLinkLayers++);
for (auto input_idx = 1; input_idx != concatLayer->insData.size(); input_idx++) {
auto concatInput = getLayerByIndex(input_idx);
auto currConcatLayer = getCreatorLayer(concatInput).lock();
auto linkWithoutQuant = std::make_shared<CNNLayer>(LayerParams({linkName, "link", Precision::FP32}));
LayerInfo infoConcatInput(currConcatLayer);
if (!infoConcatInput.isConcatAlignFilter()) {
continue;
}
auto link = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(linkWithoutQuant) :
linkWithoutQuant;
auto inputsToConcatPrev = CNNNetGetPrevLayersSkip(l, [](CNNLayerPtr origin) {
return !LayerInfo(origin).isNonFunctional() && !LayerInfo(origin).isSplit();
}, input_idx - 1);
if (inputsToConcatPrev.empty()) {
THROW_GNA_EXCEPTION << "cannot locate first input into concat layer: " << currConcatLayer;
}
auto prevInputToConcat = inputsToConcatPrev.front().first;
// concat has first input of concat align filter - dont need to reorder it
if (prevInputToConcat == currConcatLayer) {
continue;
}
bool bFinish = false;
// making a link activation possible without extra layer if first input to concat not a parent / indirect parent of second input
// using ufs - upper first search
gnalog() << "[UFS] searching for: " << prevInputToConcat->name << "\n";
CNNNetDFS(currConcatLayer, [&currConcatLayer, &prevInputToConcat, &bFinish](CNNLayerPtr layer) {
gnalog() << "[UFS] from : " << currConcatLayer->name << " reached: " << layer->name << "\n";
// found that direct input to concat is a indirect parent of align filter - so no link required
if (layer.get() == prevInputToConcat.get() || LayerInfo(prevInputToConcat).isInput()) {
gnalog() << "[UFS] copy layer insertion needed\n";
bFinish = true;
}
}, true, [&bFinish](InferenceEngine::CNNLayer* from) {
// aborting UFS once link not needed
return make_upstream_order(!bFinish ? from : nullptr);
});
auto linkName = std::string("link_") + std::to_string(numOfLinkLayers++);
auto linkWithoutQuant = std::make_shared<CNNLayer>(LayerParams({ linkName, "link", Precision::FP32 }));
auto link = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(linkWithoutQuant) :
linkWithoutQuant;
auto linkOutData = std::make_shared<Data>(linkName,
TensorDesc(Precision::FP32,
SizeVector({1}),
Layout::C));
getCreatorLayer(linkOutData) = link;
auto linkOutData = std::make_shared<Data>(linkName,
TensorDesc(Precision::FP32,
SizeVector({ 1 }),
Layout::C));
getCreatorLayer(linkOutData) = link;
link->outData.push_back(linkOutData);
link->insData.push_back(l->outData.front());
link->outData.push_back(linkOutData);
link->insData.push_back(currConcatLayer->outData.front());
getInputTo(linkOutData)[firstInputToConcat->name + ".via.link"] = firstInputToConcat;
firstInputToConcat->insData.push_back(linkOutData);
getInputTo(linkOutData)[prevInputToConcat->name + ".via.link"] = prevInputToConcat;
prevInputToConcat->insData.push_back(linkOutData);
getInputTo(l->outData.front())[linkName] = link;
getInputTo(currConcatLayer->outData.front())[linkName] = link;
}
}
}

View File

@ -0,0 +1,43 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/concat_multi_input.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
{{1, 8}, {1, 8}},
{{1, 3}, {1, 3}, {1, 3}},
{{1, 16}, {1, 16}, {1, 16}},
{{1, 16}, {1, 16}, {1, 16}, {1, 16}},
{{1, 32}, {1, 32}, {1, 32}, {1, 32}},
{{1, 16}, {1, 32}, {1, 16}, {1, 32}, {1, 16}, {1, 32}},
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
};
std::map<std::string, std::string> additional_config = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_COMPACT_MODE", "NO"},
{"GNA_SCALE_FACTOR_0", "2048"},
{"GNA_PRECISION", "I16"},
};
INSTANTIATE_TEST_CASE_P(concat_multi_input, ConcatMultiInput,
::testing::Combine(
::testing::ValuesIn(inShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(additional_config)),
ConcatMultiInput::getTestCaseName);
} //namespace

View File

@ -0,0 +1,34 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
typedef std::tuple<
std::vector<std::vector<size_t>>, // Input shapes
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string> // Config
> concatMultiParams;
namespace LayerTestsDefinitions {
class ConcatMultiInput : public testing::WithParamInterface<concatMultiParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<concatMultiParams> obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,88 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "subgraph_tests/concat_multi_input.hpp"
namespace LayerTestsDefinitions {
std::string ConcatMultiInput::getTestCaseName(testing::TestParamInfo<concatMultiParams> obj) {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> additional_config;
std::tie(inputShapes, netPrecision, targetDevice, additional_config) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}
void ConcatMultiInput::SetUp() {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> additional_config;
std::tie(inputShapes, netPrecision, targetDevice, additional_config) = this->GetParam();
configuration.insert(additional_config.begin(), additional_config.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
std::vector<size_t> paramSize = { 1, 0 };
for (const auto& val : inputShapes) {
paramSize[1] += val[1];
}
auto params = ngraph::builder::makeParams(ngPrc, { paramSize });
auto stride = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector<int64_t>{ 1, 1 });
std::vector<int64_t> newAxis = { 0, 0 };
std::vector<int64_t> begin_mask = { 0, 0 };
std::vector<int64_t> end_mask = { 0, 0 };
std::vector<std::shared_ptr<ngraph::opset1::StridedSlice>> ssArray;
ngraph::OutputVector concatInput;
auto relu = std::make_shared<ngraph::opset1::Relu>(params[0]);
std::vector<int64_t> startOffset = { 0, 0 };
for (size_t i = 0; i < inputShapes.size(); ++i) {
std::vector<int64_t> shape = { static_cast<int64_t>(inputShapes[i][0]),
static_cast<int64_t>(inputShapes[i][1]) };
std::vector<int64_t> endoffset = { static_cast<int64_t>(inputShapes[i][0]) + startOffset[0],
static_cast<int64_t>(inputShapes[i][1]) + startOffset[1]};
auto begin = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ 2 }, startOffset);
auto end = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ 2 }, endoffset);
auto ss = std::make_shared<ngraph::opset1::StridedSlice>(relu, begin, end, stride, begin_mask, end_mask, newAxis);
ssArray.push_back(ss);
concatInput.push_back(ssArray[i]);
startOffset[1] += shape[1];
}
auto concat = std::make_shared<ngraph::opset1::Concat>(concatInput, 1);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(concat) };
function = std::make_shared<ngraph::Function>(results, params, "ConcatMultiInput");
}
TEST_P(ConcatMultiInput, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions