[IE][VPU]: Move Myriad QueryNetwork to a separate file (#3687)

* Move MyriadQueryNetwork implementation to a separate file
This commit is contained in:
Anton Dudchenko 2021-01-12 12:46:52 +03:00 committed by GitHub
parent 943e511c58
commit f90b6e85b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 195 additions and 160 deletions

View File

@ -0,0 +1,16 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/ngraph.hpp>
#include <ie_icnn_network.hpp>
namespace vpu {
InferenceEngine::QueryNetworkResult getQueryNetwork(const InferenceEngine::ICNNNetwork::Ptr& convertedNetwork,
const std::shared_ptr<const ngraph::Function>& function,
const std::string& pluginName, const std::set<std::string>& supportedLayers);
} // namespace vpu

View File

@ -0,0 +1,169 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "vpu/ngraph/query_network.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/opsets/opset3.hpp"
#include <transformations/rt_info/fused_names_attribute.hpp>
#include <legacy/details/ie_cnn_network_iterator.hpp>
#include <ie_algorithm.hpp>
namespace vpu {
InferenceEngine::QueryNetworkResult getQueryNetwork(const InferenceEngine::ICNNNetwork::Ptr& convertedNetwork,
const std::shared_ptr<const ngraph::Function>& function,
const std::string& pluginName, const std::set<std::string>& supportedLayers) {
InferenceEngine::QueryNetworkResult res;
std::unordered_set<std::string> originalOps;
for (auto& node : function->get_ops()) {
originalOps.emplace(node->get_friendly_name());
}
std::unordered_set<std::string> supported;
std::unordered_set<std::string> unsupported;
std::unordered_set<std::string> splitNames;
std::unordered_set<std::string> concatNames;
ngraph::NodeVector splits;
ngraph::NodeVector concats;
const auto isLayerSupported = [&supportedLayers, &splitNames, &concatNames, &concats, &splits]
(InferenceEngine::details::CNNNetworkIterator& layer) -> bool {
auto node = (*layer)->getNode();
if (std::dynamic_pointer_cast<const ::ngraph::opset3::Split>(node) != nullptr) {
splitNames.emplace(node->get_friendly_name());
splits.push_back(node);
return false;
} else if (std::dynamic_pointer_cast<const ::ngraph::opset3::Concat>(node) != nullptr) {
concatNames.emplace(node->get_friendly_name());
concats.push_back(node);
return false;
} else {
return supportedLayers.count((*layer)->name) != 0;
}
};
for (InferenceEngine::details::CNNNetworkIterator itLayer{convertedNetwork.get()};
itLayer != InferenceEngine::details::CNNNetworkIterator();
itLayer++) {
const auto fusedNode = (*itLayer)->getNode();
if (fusedNode == nullptr) {
continue;
}
for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (isLayerSupported(itLayer)) {
supported.emplace(fusedLayerName);
} else {
unsupported.emplace(fusedLayerName);
}
}
}
}
for (const auto& layerName : supported) {
if (supported.empty()) {
break;
}
if (InferenceEngine::details::contains(unsupported, layerName)) {
supported.erase(layerName);
}
}
unsupported.clear();
std::function<void(std::shared_ptr<ngraph::Node>)> markParentSplitAsUnsupported = [&markParentSplitAsUnsupported, &supported, &splitNames]
(const std::shared_ptr<ngraph::Node>& split) {
const auto inputs = split->inputs();
for (const auto& input : inputs) {
const auto& parentName = input.get_source_output().get_node()->get_friendly_name();
if (InferenceEngine::details::contains(supported, parentName) &&
InferenceEngine::details::contains(splitNames, parentName)) {
markParentSplitAsUnsupported(input.get_source_output().get_node_shared_ptr());
}
}
const auto& name = split->get_friendly_name();
if (InferenceEngine::details::contains(supported, name)) {
supported.erase(name);
}
};
for (const auto& split : splits) {
// We will mark split as a supported only if all consumers is supported
bool is_supported = true;
const auto outputs = split->outputs();
for (const auto& output : outputs) {
for (const auto& consumer : output.get_target_inputs()) {
const auto& name = consumer.get_node()->get_friendly_name();
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(concatNames, name) &&
!InferenceEngine::details::contains(splitNames, name)) {
is_supported = false;
break;
}
}
}
if (is_supported) {
supported.emplace(split->get_friendly_name());
} else {
// If Split is not supported and it's parent is also Split, mark parent as unsupported
markParentSplitAsUnsupported(split);
}
}
for (const auto& concat : concats) {
// We will mark concat as a supported only if all parent layers is supported
bool is_supported = true;
const auto inputs = concat->inputs();
for (const auto& input : inputs) {
const auto& name = input.get_source_output().get_node()->get_friendly_name();
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(concatNames, name)) {
is_supported = false;
break;
}
}
if (is_supported) {
supported.emplace(concat->get_friendly_name());
}
}
for (const auto& node : function->get_ops()) {
if (InferenceEngine::details::contains(supported, node->get_friendly_name())) {
for (const auto& inputNodeOutput : node->input_values()) {
if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) {
supported.emplace(inputNodeOutput.get_node()->get_friendly_name());
}
}
for (const auto& outputs : node->outputs()) {
for (const auto& outputNodeInput : outputs.get_target_inputs()) {
if (ngraph::op::is_output(outputNodeInput.get_node())) {
supported.emplace(outputNodeInput.get_node()->get_friendly_name());
}
}
}
}
if (ngraph::op::is_constant(node) || ngraph::op::is_parameter(node)) {
if (!InferenceEngine::details::contains(supported, node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) {
supported.erase(node->get_friendly_name());
}
} else if (ngraph::op::is_output(node)) {
if (!InferenceEngine::details::contains(supported, node->input_values().begin()->get_node()->get_friendly_name())) {
supported.erase(node->get_friendly_name());
}
}
}
for (const auto& layerName : supported) {
res.supportedLayersMap.emplace(layerName, pluginName);
}
return res;
}
} // namespace vpu

View File

@ -189,7 +189,6 @@ public:
static CustomLayer::Ptr getSuitableCustomLayer(const std::vector<CustomLayer::Ptr>& customLayers, const ie::CNNLayerPtr&cnnLayer);
static ie::ICNNNetwork::Ptr convertNetwork(ie::ICNNNetwork& network);
bool isLayerSupported(const std::string& type);
private:
Data getVpuData(const ie::DataPtr& ieData) const;

View File

@ -153,10 +153,6 @@ ModelPtr FrontEnd::buildInitialModel(const ie::ICNNNetwork& network) {
return runCommonPasses(network);
}
bool FrontEnd::isLayerSupported(const std::string& type) {
return parsers.count(type) != 0;
}
ie::ICNNNetwork::Ptr FrontEnd::convertNetwork(ie::ICNNNetwork& network) {
// disable transformations for some cases
const auto transformationsPredicate = [](const std::shared_ptr<const ngraph::Node>& node) -> bool {

View File

@ -17,10 +17,8 @@
#include <vpu/frontend/frontend.hpp>
#include <vpu/utils/profiling.hpp>
#include <vpu/utils/error.hpp>
#include <vpu/ngraph/query_network.hpp>
#include <transformations/common_optimizations/common_optimizations.hpp>
#include <transformations/rt_info/fused_names_attribute.hpp>
#include <ngraph/op/util/op_types.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include "generic_ie.hpp"
@ -82,168 +80,25 @@ QueryNetworkResult Engine::QueryNetwork(
VPU_THROW_UNLESS(!(std::find(deviceIDs.begin(), deviceIDs.end(), deviceName) == deviceIDs.end()), "Myriad device: {} not found.", deviceName);
}
if (auto function = network.getFunction()) {
std::unordered_set<std::string> originalOps;
for (auto& node : function->get_ops()) {
originalOps.emplace(node->get_friendly_name());
}
auto clonedNetwork = cloneNetwork(network);
auto convertedNetwork = vpu::FrontEnd::convertNetwork(*clonedNetwork);
std::unordered_set<std::string> supported;
std::unordered_set<std::string> unsupported;
std::unordered_set<std::string> splitNames;
std::unordered_set<std::string> concatNames;
ngraph::NodeVector splits;
ngraph::NodeVector concats;
const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](InferenceEngine::details::CNNNetworkIterator& layer) -> bool {
auto node = (*layer)->getNode();
if (std::dynamic_pointer_cast<const ::ngraph::opset3::Split>(node) != nullptr) {
splitNames.emplace(node->get_friendly_name());
splits.push_back(node);
return false;
} else if (std::dynamic_pointer_cast<const ::ngraph::opset3::Concat>(node) != nullptr) {
concatNames.emplace(node->get_friendly_name());
concats.push_back(node);
return false;
} else {
auto stageBuilder = std::make_shared<StageBuilder>();
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, GetCore());
return frontEnd->isLayerSupported((*layer)->type);
}
};
for (InferenceEngine::details::CNNNetworkIterator itLayer{convertedNetwork.get()};
itLayer != InferenceEngine::details::CNNNetworkIterator();
itLayer++) {
const auto fusedNode = (*itLayer)->getNode();
if (fusedNode == nullptr) {
continue;
}
for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (isLayerSupported(itLayer)) {
supported.emplace(fusedLayerName);
} else {
unsupported.emplace(fusedLayerName);
}
}
}
}
for (const auto& layerName : supported) {
if (InferenceEngine::details::contains(unsupported, layerName)) {
supported.erase(layerName);
}
}
unsupported.clear();
std::function<void(std::shared_ptr<ngraph::Node>)> markParentSplitAsUnsupported = [&markParentSplitAsUnsupported, &supported, &splitNames]
(const std::shared_ptr<ngraph::Node>& split) {
const auto inputs = split->inputs();
for (const auto& input : inputs) {
const auto& parentName = input.get_source_output().get_node()->get_friendly_name();
if (InferenceEngine::details::contains(supported, parentName) &&
InferenceEngine::details::contains(splitNames, parentName)) {
markParentSplitAsUnsupported(input.get_source_output().get_node_shared_ptr());
}
}
const auto& name = split->get_friendly_name();
if (InferenceEngine::details::contains(supported, name)) {
supported.erase(name);
}
};
for (const auto& split : splits) {
// We will mark split as a supported only if all consumers is supported
bool is_supported = true;
const auto outputs = split->outputs();
for (const auto& output : outputs) {
for (const auto& consumer : output.get_target_inputs()) {
const auto& name = consumer.get_node()->get_friendly_name();
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(concatNames, name) &&
!InferenceEngine::details::contains(splitNames, name)) {
is_supported = false;
break;
}
}
}
if (is_supported) {
supported.emplace(split->get_friendly_name());
} else {
// If Split is not supported and it's parent is also Split, mark parent as unsupported
markParentSplitAsUnsupported(split);
}
}
for (const auto& concat : concats) {
// We will mark concat as a supported only if all parent layers is supported
bool is_supported = true;
const auto inputs = concat->inputs();
for (const auto& input : inputs) {
const auto& name = input.get_source_output().get_node()->get_friendly_name();
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(concatNames, name)) {
is_supported = false;
break;
}
}
if (is_supported) {
supported.emplace(concat->get_friendly_name());
}
}
for (const auto& node : function->get_ops()) {
if (InferenceEngine::details::contains(supported, node->get_friendly_name())) {
for (const auto& inputNodeOutput : node->input_values()) {
if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) {
supported.emplace(inputNodeOutput.get_node()->get_friendly_name());
}
}
for (const auto& outputs : node->outputs()) {
for (const auto& outputNodeInput : outputs.get_target_inputs()) {
if (ngraph::op::is_output(outputNodeInput.get_node())) {
supported.emplace(outputNodeInput.get_node()->get_friendly_name());
}
}
}
}
if (ngraph::op::is_constant(node) || ngraph::op::is_parameter(node)) {
if (!InferenceEngine::details::contains(supported, node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) {
supported.erase(node->get_friendly_name());
}
} else if (ngraph::op::is_output(node)) {
if (!InferenceEngine::details::contains(supported, node->input_values().begin()->get_node()->get_friendly_name())) {
supported.erase(node->get_friendly_name());
}
}
}
for (const auto& layerName : supported) {
res.supportedLayersMap.emplace(layerName, GetName());
}
} else {
const auto log = std::make_shared<Logger>(
const auto log = std::make_shared<Logger>(
"GraphCompiler",
parsedConfigCopy.logLevel(),
defaultOutput(parsedConfigCopy.compilerLogFilePath()));
const auto layerNames = getSupportedLayers(
const auto supportedLayers = getSupportedLayers(
network,
static_cast<Platform>(parsedConfigCopy.platform()),
parsedConfigCopy.compileConfig(),
log,
GetCore());
for (const auto& layerName : layerNames) {
if (auto function = network.getFunction()) {
auto clonedNetwork = cloneNetwork(network);
auto convertedNetwork = vpu::FrontEnd::convertNetwork(*clonedNetwork);
res = getQueryNetwork(convertedNetwork, function, GetName(), supportedLayers);
} else {
for (const auto& layerName : supportedLayers) {
res.supportedLayersMap.insert({ layerName, GetName() });
}
}