Fixed CNNNetwork iterator for some corner cases with Memory (#4084)
* Fixed CNNNetwork iterator for some corner cases with Memory * Fixed tests
This commit is contained in:
@@ -32,7 +32,6 @@ class INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Functio
|
||||
CNNNetworkIterator {
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
|
||||
std::unordered_set<CNNLayer*> visited {};
|
||||
std::list<CNNLayerPtr> nextLayersToVisit {};
|
||||
InferenceEngine::CNNLayerPtr currentLayer = nullptr;
|
||||
const ICNNNetwork* network = nullptr;
|
||||
@@ -56,6 +55,7 @@ CNNNetworkIterator {
|
||||
}
|
||||
return consumers;
|
||||
};
|
||||
std::unordered_set<CNNLayer*> visited;
|
||||
auto bfs = [&](const CNNLayerPtr& start_node, bool traverse_via_outputs = false) {
|
||||
if (!start_node || visited.count(start_node.get())) return;
|
||||
std::deque<CNNLayerPtr> q;
|
||||
@@ -98,16 +98,31 @@ CNNNetworkIterator {
|
||||
}
|
||||
};
|
||||
|
||||
// First we run bfs starting from outputs that provides deterministic graph traverse
|
||||
for (const auto & output : outputs) {
|
||||
bfs(getCreatorLayer(output.second).lock());
|
||||
// Find all outputLayers
|
||||
std::vector<CNNLayerPtr> outputLayers;
|
||||
const auto* networkImpl = dynamic_cast<const CNNNetworkImpl*>(network);
|
||||
if (networkImpl) {
|
||||
for (const auto & node : networkImpl->allLayers()) {
|
||||
if (get_consumers(node.second).empty())
|
||||
outputLayers.emplace_back(node.second);
|
||||
}
|
||||
} else {
|
||||
// For backward compatibility
|
||||
for (const auto& out : outputs) {
|
||||
outputLayers.emplace_back(getCreatorLayer(out.second).lock());
|
||||
}
|
||||
}
|
||||
|
||||
// For cases when graph has no outputs we start bfs from inputs to ensure topological sort
|
||||
for (const auto & input : inputs) {
|
||||
const auto data_ptr = input.second->getInputData();
|
||||
for (const auto & consumer : getInputTo(data_ptr))
|
||||
bfs(consumer.second, true);
|
||||
// First we run bfs starting from outputs that provides deterministic graph traverse
|
||||
for (const auto & output : outputLayers) {
|
||||
bfs(output);
|
||||
}
|
||||
if (!networkImpl) {
|
||||
// For cases when graph has no outputs we start bfs from inputs to ensure topological sort
|
||||
for (const auto & input : inputs) {
|
||||
const auto data_ptr = input.second->getInputData();
|
||||
for (const auto & consumer : getInputTo(data_ptr))
|
||||
bfs(consumer.second, true);
|
||||
}
|
||||
}
|
||||
currentLayer = nextLayersToVisit.front();
|
||||
}
|
||||
|
||||
@@ -12,12 +12,14 @@
|
||||
#include <transformations/common_optimizations/common_optimizations.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp>
|
||||
#include <legacy/details/ie_cnn_network_iterator.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset2_to_opset1.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
|
||||
#include <transformations/convert_precision.hpp>
|
||||
#include <ngraph/function.hpp>
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph/opsets/opset4.hpp>
|
||||
#include <ngraph/opsets/opset6.hpp>
|
||||
#include <ngraph_ops/convolution_ie.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <legacy/convert_function_to_cnn_network.hpp>
|
||||
@@ -389,3 +391,77 @@ TEST(ConvertFunctionToCNNNetworkTests, NonUniqueNamesParametersNegative) {
|
||||
EXPECT_THAT(e.what(), testing::HasSubstr(std::string("Detected two output operations with the same name:")));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ConvertFunctionToCNNNetworkTests, IteratorForMemoryLayers) {
|
||||
std::shared_ptr<ngraph::Function> f(nullptr);
|
||||
{
|
||||
auto constReadVal = ngraph::opset6::Constant::create(ngraph::element::f32, {1, 37632}, {0});
|
||||
constReadVal->set_friendly_name("const");
|
||||
auto readVal = std::make_shared<ngraph::opset6::ReadValue>(constReadVal, "buffer_1");
|
||||
readVal->set_friendly_name("readVal_Buf1");
|
||||
|
||||
auto constVarSplit1 = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {1});
|
||||
constVarSplit1->set_friendly_name("varSplitConst1");
|
||||
auto constVarSplit2 = ngraph::opset6::Constant::create(ngraph::element::i64, {2}, {5376, 32256});
|
||||
constVarSplit2->set_friendly_name("varSplitConst2");
|
||||
|
||||
auto varSplit = std::make_shared<ngraph::opset6::VariadicSplit>(readVal, constVarSplit1, constVarSplit2);
|
||||
|
||||
auto param1 = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::f32, ngraph::Shape{1, 5376});
|
||||
auto varConcat = std::make_shared<ngraph::opset6::Concat>(ngraph::OutputVector{varSplit->output(0), param1}, 1);
|
||||
auto result = std::make_shared<ngraph::opset6::Result>(varConcat);
|
||||
|
||||
auto param2 = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::f32, ngraph::Shape{1, 5376});
|
||||
auto varConcat2 = std::make_shared<ngraph::opset6::Concat>(ngraph::OutputVector{varSplit->output(1), param2}, 1);
|
||||
|
||||
auto assign = std::make_shared<ngraph::opset6::Assign>(varConcat2, "buffer_1");
|
||||
f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::SinkVector{assign}, ngraph::ParameterVector{param1, param2});
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork nGraphImpl(f);
|
||||
nGraphImpl = CNNNetwork(InferenceEngine::details::convertFunctionToICNNNetwork(f, nGraphImpl));
|
||||
int memory_count(0);
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
for (details::CNNNetworkIterator itLayer{nGraphImpl}; itLayer != details::CNNNetworkIterator(); itLayer++) {
|
||||
if ((*itLayer)->type == "Memory")
|
||||
memory_count++;
|
||||
}
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
ASSERT_EQ(2, memory_count);
|
||||
}
|
||||
|
||||
TEST(ConvertFunctionToCNNNetworkTests, IteratorForMemoryLayers2) {
|
||||
std::shared_ptr<ngraph::Function> f(nullptr);
|
||||
{
|
||||
auto constReadVal = ngraph::opset6::Constant::create(ngraph::element::f32, {1, 37632}, {0});
|
||||
constReadVal->set_friendly_name("const");
|
||||
auto readVal = std::make_shared<ngraph::opset6::ReadValue>(constReadVal, "buffer_1");
|
||||
readVal->set_friendly_name("readVal_Buf1");
|
||||
|
||||
auto constVarSplit1 = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {1});
|
||||
constVarSplit1->set_friendly_name("varSplitConst1");
|
||||
auto constVarSplit2 = ngraph::opset6::Constant::create(ngraph::element::i64, {2}, {5376, 32256});
|
||||
constVarSplit2->set_friendly_name("varSplitConst2");
|
||||
|
||||
auto varSplit = std::make_shared<ngraph::opset6::VariadicSplit>(readVal, constVarSplit1, constVarSplit2);
|
||||
|
||||
auto param2 = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::f32, ngraph::Shape{1, 5376});
|
||||
auto varConcat2 = std::make_shared<ngraph::opset6::Concat>(ngraph::OutputVector{varSplit->output(1), param2}, 1);
|
||||
|
||||
auto assign = std::make_shared<ngraph::opset6::Assign>(varConcat2, "buffer_1");
|
||||
f = std::make_shared<ngraph::Function>(ngraph::ResultVector{}, ngraph::SinkVector{assign}, ngraph::ParameterVector{param2});
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork nGraphImpl(f);
|
||||
nGraphImpl = CNNNetwork(InferenceEngine::details::convertFunctionToICNNNetwork(f, nGraphImpl));
|
||||
int memory_count(0);
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
for (details::CNNNetworkIterator itLayer{nGraphImpl}; itLayer != details::CNNNetworkIterator(); itLayer++) {
|
||||
if ((*itLayer)->type == "Memory")
|
||||
memory_count++;
|
||||
}
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
ASSERT_EQ(2, memory_count);
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user