Fix cases when identities inserted identites are separated by nonFunctional layers (#2771)
This commit is contained in:
parent
6a2c209e58
commit
3f4d8b49ff
@ -631,11 +631,25 @@ void InsertIdentityLayerPass::run() {
|
||||
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(pLayers->front());
|
||||
for (auto & l : *pLayers) {
|
||||
for (auto && prev : getCandidatesForIdentityInsertion(l)) {
|
||||
// Do an upstream search until Functional layer is found
|
||||
auto original_prev_layer = prev;
|
||||
auto true_layer = l;
|
||||
while (LayerInfo(prev).isNonFunctional()) {
|
||||
if (CNNNetHasPrevLayer(prev.get()) && prev->outData.size() == 1) {
|
||||
true_layer = prev;
|
||||
prev = CNNNetPrevLayer(prev);
|
||||
} else {
|
||||
gnawarn() << "Could not find Functional parent for " << original_prev_layer->name << ", using original layer";
|
||||
prev = original_prev_layer;
|
||||
true_layer = l;
|
||||
break;
|
||||
}
|
||||
}
|
||||
int numOfIdentityLayers = this->getPassManager()->getIntVar(identityLayersCounterName)++;
|
||||
// actual insertion
|
||||
auto activationName = std::string("identity_") + std::to_string(numOfIdentityLayers);
|
||||
|
||||
gnalog() << "Inserted "<< activationName << " between: " << prev->name << " and " << l->name << "\n" << std::flush;
|
||||
gnalog() << "Inserted "<< activationName << " between: " << prev->name << " and " << true_layer->name << "\n" << std::flush;
|
||||
|
||||
CNNLayerPtr activationLayer =
|
||||
std::make_shared<GenericLayer>(LayerParams({activationName, "identity", Precision::FP32}));
|
||||
@ -643,17 +657,17 @@ void InsertIdentityLayerPass::run() {
|
||||
// TODO: why index is 0 ? - better use direct indexing in getCandidateFunction
|
||||
// detecting ins-data-idx
|
||||
size_t insDataIdx = std::numeric_limits<size_t>::max();
|
||||
for (size_t i = 0; i != l->insData.size(); i++) {
|
||||
if (getCreatorLayer(l->insData[i].lock()).lock() == prev) {
|
||||
for (size_t i = 0; i != true_layer->insData.size(); i++) {
|
||||
if (getCreatorLayer(true_layer->insData[i].lock()).lock() == prev) {
|
||||
insDataIdx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (insDataIdx == std::numeric_limits<size_t>::max()) {
|
||||
THROW_GNA_EXCEPTION << "cannot insert identity layer after" << prev->name << " and before " << l->name;
|
||||
THROW_GNA_EXCEPTION << "cannot insert identity layer after" << prev->name << " and before " << true_layer->name;
|
||||
}
|
||||
|
||||
auto inputData = l->insData[insDataIdx].lock();
|
||||
auto inputData = true_layer->insData[insDataIdx].lock();
|
||||
|
||||
auto dataPtr = std::make_shared<Data>("identity_data_" + std::to_string(numOfIdentityLayers), inputData->getTensorDesc());
|
||||
auto activationLayerWithQuant = quantized ?
|
||||
@ -681,7 +695,7 @@ void InsertIdentityLayerPass::run() {
|
||||
activationLayerWithQuant->params["original_num_rows"] = prev->params["original_num_rows"];
|
||||
}
|
||||
|
||||
CNNNetworkInsertLayer(prev, notAll ? l : CNNLayerPtr(nullptr), activationLayerWithQuant);
|
||||
CNNNetworkInsertLayer(prev, notAll ? true_layer : CNNLayerPtr(nullptr), activationLayerWithQuant);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
#include <subgraph_tests/memory_eltwise_reshape_concat.hpp>
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
namespace SubgraphTestsDefinitions {
|
||||
namespace {
|
||||
std::vector<size_t> input_multiples = {
|
||||
1,
|
||||
7,
|
||||
5,
|
||||
8
|
||||
};
|
||||
|
||||
std::vector<size_t> concat_sizes = {
|
||||
32,
|
||||
64
|
||||
};
|
||||
|
||||
std::map<std::string, std::string> additional_config = {
|
||||
{"GNA_COMPACT_MODE", "NO"},
|
||||
{"GNA_DEVICE_MODE", "GNA_SW_FP32"},
|
||||
{"GNA_SCALE_FACTOR_0", "1638.4"},
|
||||
};
|
||||
} // namespace
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_MemoryEltwiseReshapeConcatTest, MemoryEltwiseReshapeConcatTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_GNA),
|
||||
::testing::Values(InferenceEngine::Precision::FP32),
|
||||
::testing::ValuesIn(input_multiples),
|
||||
::testing::ValuesIn(concat_sizes),
|
||||
::testing::Values(additional_config)),
|
||||
MemoryEltwiseReshapeConcatTest::getTestCaseName);
|
||||
} // namespace SubgraphTestsDefinitions
|
@ -0,0 +1,37 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
#pragma once
|
||||
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include <ie_core.hpp>
|
||||
|
||||
namespace SubgraphTestsDefinitions {
|
||||
typedef std::tuple<
|
||||
std::string, // Target device name
|
||||
InferenceEngine::Precision, // Network precision
|
||||
size_t, // Mutiples of concat size to be used as input size
|
||||
size_t, // Concat size
|
||||
std::map<std::string, std::string> // Configuration
|
||||
> memoryEltwiseReshapeConcatParams;
|
||||
|
||||
class MemoryEltwiseReshapeConcatTest : public LayerTestsUtils::LayerTestsCommon,
|
||||
public testing::WithParamInterface<memoryEltwiseReshapeConcatParams> {
|
||||
private:
|
||||
void initTestModel();
|
||||
// you have to replace memory layers since ngraph does not support them
|
||||
void initNgraphFriendlyModel();
|
||||
|
||||
// since we switching models we need to generate and save these values in SetUp
|
||||
size_t inputSize;
|
||||
size_t concatSize;
|
||||
ngraph::element::Type ngPrc;
|
||||
std::vector<float> memory_init;
|
||||
std::vector<float> concat_vals;
|
||||
protected:
|
||||
void SetUp() override;
|
||||
void Run() override;
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<memoryEltwiseReshapeConcatParams> &obj);
|
||||
};
|
||||
} // namespace SubgraphTestsDefinitions
|
@ -0,0 +1,150 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
|
||||
#include "ie_core.hpp"
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "functional_test_utils/precision_utils.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
#include <transformations/op_conversions/lstm_cell_decomposition.hpp>
|
||||
#include "subgraph_tests/memory_eltwise_reshape_concat.hpp"
|
||||
|
||||
namespace SubgraphTestsDefinitions {
|
||||
|
||||
std::string MemoryEltwiseReshapeConcatTest::getTestCaseName(const testing::TestParamInfo<memoryEltwiseReshapeConcatParams> &obj) {
|
||||
std::string targetDevice;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
size_t inputSize;
|
||||
size_t concatSize;
|
||||
std::map<std::string, std::string> config;
|
||||
std::tie(targetDevice, netPrecision, inputSize, concatSize, config) = obj.param;
|
||||
std::ostringstream result;
|
||||
|
||||
result << "netPrecision=" << netPrecision.name() << "_";
|
||||
result << "IS=" << inputSize << "_";
|
||||
result << "CS=" << concatSize << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void MemoryEltwiseReshapeConcatTest::SetUp() {
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::map<std::string, std::string> config;
|
||||
std::tie(targetDevice, netPrecision, inputSize, concatSize, config) = this->GetParam();
|
||||
configuration.insert(config.begin(), config.end());
|
||||
ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
|
||||
const int seed = 0;
|
||||
std::mt19937 gen(static_cast<float>(seed));
|
||||
|
||||
auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
|
||||
std::vector<float> res;
|
||||
|
||||
std::uniform_real_distribution<float> dist(min, max);
|
||||
for (int i = 0; i < vec_len; i++)
|
||||
res.emplace_back(static_cast<float>(dist(gen)));
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
memory_init = generateFloatNumbers(inputSize * concatSize, -1.0f, 1.0f);
|
||||
concat_vals = generateFloatNumbers(concatSize, 12.0f, 14.0f);
|
||||
}
|
||||
|
||||
void MemoryEltwiseReshapeConcatTest::initTestModel() {
|
||||
InferenceEngine::SizeVector input_dims = {1, inputSize * concatSize};
|
||||
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
||||
|
||||
auto memory_constant = ngraph::builder::makeConstant<float>(ngPrc, input_dims, memory_init);
|
||||
memory_constant->set_friendly_name("memory_constant");
|
||||
auto memory_read = std::make_shared<ngraph::op::ReadValue>(memory_constant, "memory");
|
||||
memory_read->set_friendly_name("memory_read");
|
||||
|
||||
auto mul = ngraph::builder::makeEltwise(input_parameter[0], memory_read, ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
mul->set_friendly_name("multiplication");
|
||||
|
||||
auto memory_write = std::make_shared<ngraph::op::Assign>(mul, "memory");
|
||||
memory_write->set_friendly_name("memory_write");
|
||||
|
||||
auto reshape_1_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, std::vector<size_t>({inputSize, concatSize}));
|
||||
reshape_1_pattern->set_friendly_name("reshape_pattern");
|
||||
auto reshape_1 = std::make_shared<ngraph::op::v1::Reshape>(mul, reshape_1_pattern, false);
|
||||
reshape_1->set_friendly_name("reshape");
|
||||
|
||||
auto concat_constant = ngraph::builder::makeConstant(ngPrc, {1, concatSize}, concat_vals);
|
||||
concat_constant->set_friendly_name("concat_constant");
|
||||
|
||||
auto concat = ngraph::builder::makeConcat({concat_constant, reshape_1}, 0);
|
||||
|
||||
memory_write->add_control_dependency(memory_read);
|
||||
concat->add_control_dependency(memory_write);
|
||||
|
||||
auto final_reshape_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{4},
|
||||
std::vector<size_t>({1, 1, inputSize + 1, concatSize}));
|
||||
auto final_reshape = std::make_shared<ngraph::op::v1::Reshape>(concat, final_reshape_pattern, false);
|
||||
|
||||
function = std::make_shared<ngraph::Function>(final_reshape, input_parameter, "memory_multiply_reshape_concat");
|
||||
}
|
||||
|
||||
void MemoryEltwiseReshapeConcatTest::initNgraphFriendlyModel() {
|
||||
InferenceEngine::SizeVector input_dims = {1, inputSize * concatSize};
|
||||
auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
|
||||
|
||||
auto memory_constant = ngraph::builder::makeConstant<float>(ngPrc, input_dims, memory_init);
|
||||
memory_constant->set_friendly_name("memory_constant");
|
||||
|
||||
auto mul = ngraph::builder::makeEltwise(input_parameter[0], memory_constant, ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
mul->set_friendly_name("multiplication");
|
||||
|
||||
auto reshape_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{3}, std::vector<size_t>({1, inputSize, concatSize}));
|
||||
reshape_pattern->set_friendly_name("reshape_pattern");
|
||||
auto reshape = std::make_shared<ngraph::op::v1::Reshape>(mul, reshape_pattern, false);
|
||||
reshape->set_friendly_name("reshape");
|
||||
|
||||
auto squeeze_const = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, 0);
|
||||
squeeze_const->set_friendly_name("squeeze_const");
|
||||
auto squeeze = std::make_shared<ngraph::op::Squeeze>(reshape, squeeze_const);
|
||||
squeeze->set_friendly_name("squeeze");
|
||||
|
||||
auto concat_constant = ngraph::builder::makeConstant(ngPrc, {1, concatSize}, concat_vals);
|
||||
concat_constant->set_friendly_name("concat_constant");
|
||||
|
||||
auto concat = ngraph::builder::makeConcat({concat_constant, squeeze}, 0);
|
||||
|
||||
function = std::make_shared<ngraph::Function>(concat, input_parameter, "memory_multiply_reshape_concat");
|
||||
}
|
||||
|
||||
void MemoryEltwiseReshapeConcatTest::Run() {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
initTestModel();
|
||||
LoadNetwork();
|
||||
|
||||
InferenceEngine::TensorDesc state_description(InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::SizeVector({1, inputSize * concatSize}),
|
||||
InferenceEngine::Layout::NC);
|
||||
|
||||
auto states = executableNetwork.QueryState();
|
||||
auto state_values_blob = FuncTestUtils::createAndFillBlobWithFloatArray(state_description,
|
||||
memory_init.data(), memory_init.size());
|
||||
states[0].SetState(state_values_blob);
|
||||
Infer();
|
||||
initNgraphFriendlyModel();
|
||||
Validate();
|
||||
}
|
||||
|
||||
TEST_P(MemoryEltwiseReshapeConcatTest, CompareWithRefs) {
|
||||
Run();
|
||||
};
|
||||
} // namespace SubgraphTestsDefinitions
|
Loading…
Reference in New Issue
Block a user