Removed legacy from hetero plugin (#3398)

Co-authored-by: apankratovantonp <anton.pankratov@intel.com>
Co-authored-by: Alexander Zhogov <alexander.zhogov@intel.com>
This commit is contained in:
Ilya Lavrenov 2020-11-30 07:08:31 +03:00 committed by GitHub
parent ac8a39da87
commit fb1b03752d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 42 additions and 817 deletions

View File

@ -16,8 +16,8 @@ ie_faster_build(${TARGET_NAME}
UNITY
)
target_link_libraries(${TARGET_NAME} PRIVATE ade pugixml inference_engine
inference_engine_legacy ${NGRAPH_LIBRARIES} inference_engine_transformations)
target_link_libraries(${TARGET_NAME} PRIVATE pugixml inference_engine
${NGRAPH_LIBRARIES} inference_engine_transformations)
ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME})

View File

@ -1,68 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_ade_util.hpp"
#include <unordered_map>
#include <utility>
#include <ie_icnn_network.hpp>
#include <legacy/ie_util_internal.hpp>
#include <legacy/ie_layers.h>
#include <ade/util/algorithm.hpp>
#include <ade/graph.hpp>
#include <ade/typed_graph.hpp>
namespace InferenceEngine {
namespace {
using VisitedLayersMap = std::unordered_map<CNNLayer::Ptr, ade::NodeHandle>;
using TGraph = ade::TypedGraph<CNNLayerMetadata>;
void translateVisitLayer(VisitedLayersMap& visited,
TGraph& gr,
const ade::NodeHandle& prevNode,
const CNNLayer::Ptr& layer) {
assert(nullptr != layer);;
assert(!ade::util::contains(visited, layer));
auto node = gr.createNode();
gr.metadata(node).set(CNNLayerMetadata{layer});
if (nullptr != prevNode) {
gr.link(prevNode, node);
}
visited.insert({layer, node});
for (auto&& data : layer->outData) {
for (auto&& layerIt : getInputTo(data)) {
auto nextLayer = layerIt.second;
auto it = visited.find(nextLayer);
if (visited.end() == it) {
translateVisitLayer(visited, gr, node, nextLayer);
} else {
gr.link(node, it->second);
}
}
}
}
} // namespace
void translateNetworkToAde(ade::Graph& gr, ICNNNetwork& network) {
TGraph tgr(gr);
VisitedLayersMap visited;
for (auto& data : getRootDataObjects(network)) {
assert(nullptr != data);
for (auto& layerIt : getInputTo(data)) {
auto layer = layerIt.second;
assert(nullptr != layer);
if (!ade::util::contains(visited, layer)) {
translateVisitLayer(visited, tgr, nullptr, layer);
}
}
}
}
const char* CNNLayerMetadata::name() {
return "CNNLayerMetadata";
}
} // namespace InferenceEngine

View File

@ -1,24 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
namespace ade {
class Graph;
} // namespace ade
namespace InferenceEngine {
struct CNNLayerMetadata {
CNNLayerPtr layer;
static const char* name();
};
class ICNNNetwork;
void translateNetworkToAde(ade::Graph& gr, ICNNNetwork& network);
} // namespace InferenceEngine

View File

@ -5,8 +5,6 @@
#include "ie_metric_helpers.hpp"
#include "hetero_executable_network.hpp"
#include "hetero_async_infer_request.hpp"
#include <legacy/ie_util_internal.hpp>
#include "hetero_graph_splitter.hpp"
#include "hetero_itt.hpp"
#include "xml_parse_utils.h"
#include <caseless.hpp>
@ -44,283 +42,19 @@ using namespace HeteroPlugin;
using namespace InferenceEngine::PluginConfigParams;
using namespace InferenceEngine::HeteroConfigParams;
namespace {
void forward(const CNNLayerPtr& layer, std::deque<InferenceEngine::CNNLayerPtr>& layers) {
for (const auto& out : layer->outData) {
for (const auto& out_link : getInputTo(out)) {
const auto& nextLayer = out_link.second;
if (nullptr != nextLayer) {
layers.emplace_back(nextLayer);
}
}
}
}
template<class T>
void traverse(T& inputs,
std::function<void(InferenceEngine::CNNLayerPtr& layer)> apply,
std::function<void(const InferenceEngine::CNNLayerPtr& layer, std::deque<InferenceEngine::CNNLayerPtr>& layers)> expand = forward) {
std::unordered_set<InferenceEngine::CNNLayerPtr> visitedObjects;
std::deque<InferenceEngine::CNNLayerPtr> layersToCheck;
layersToCheck.insert(layersToCheck.end(), inputs.begin(), inputs.end());
while (!layersToCheck.empty()) {
auto& layer = layersToCheck.front();
if (visitedObjects.insert(layer).second) {
apply(layer);
expand(layer, layersToCheck);
}
layersToCheck.pop_front();
}
}
void traverse(InferenceEngine::ICNNNetwork& network,
std::function<void(InferenceEngine::CNNLayerPtr& layer)> apply,
std::function<void(const InferenceEngine::CNNLayerPtr& layer,
std::deque<InferenceEngine::CNNLayerPtr>& layers)> expand = forward) {
std::vector<InferenceEngine::CNNLayerPtr> layers;
InferenceEngine::InputsDataMap inputs;
network.getInputsInfo(inputs);
for (const auto& input : inputs) {
const auto data = input.second->getInputData();
for (const auto& to : getInputTo(data)) {
const auto nextLayer = to.second;
assert(nullptr != nextLayer);
layers.emplace_back(nextLayer);
}
}
traverse(layers, apply, expand);
}
std::vector<std::string> getAffinities(InferenceEngine::ICNNNetwork &network) {
std::vector<std::string> ret;
std::unordered_set<std::string> affinities;
traverse(network,
[&](const InferenceEngine::CNNLayerPtr &layer) {
assert(nullptr != layer);
if (!contains(affinities, layer->affinity)) {
affinities.insert(layer->affinity);
ret.push_back(layer->affinity);
}
});
return ret;
}
void dumpGraph(InferenceEngine::ICNNNetwork &network,
const std::vector<LayersSet> &subgraphs,
std::ostream &stream) {
static const std::array<const char *, 9> colors{{"#FFC405",
"#20F608",
"#F1F290",
"#C405FF",
"#BCFF05",
"#05FFC4",
"#FFC405",
"#5A5DF0",
"#FF2E05"}};
auto split_color = [subgraphs](const CNNLayerPtr layer,
ordered_properties &printed_properties,
ordered_properties &node_properties) {
for (size_t i = 0; i < subgraphs.size(); i++) {
for (auto s : subgraphs[i]) {
if (s->name == layer->name) {
node_properties.emplace_back(
"fillcolor",
colors[std::min(i, colors.size() - 1)]);
printed_properties.insert(printed_properties.begin(),
std::pair<std::string, std::string>("subgraph#", std::to_string(i)));
printed_properties.insert(printed_properties.begin(),
std::pair<std::string, std::string>("device", layer->affinity));
return;
}
}
}
};
saveGraphToDot(network, stream, split_color);
}
} // namespace
void HeteroExecutableNetwork::InitCNNImpl(const InferenceEngine::CNNNetwork& network_) {
auto networkPtr = cloneNet(network_);
auto& network = *networkPtr;
// going over all network, if all layers are not assigned to devices, apply the default fallback policy
details::CNNNetworkIterator i(&network);
bool allEmpty = true;
while (i != details::CNNNetworkIterator()) {
CNNLayer::Ptr layer = *i;
if (!layer->affinity.empty()) {
allEmpty = false;
break;
}
i++;
}
auto itDumpDotFile = _config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
bool dumpDotFile = itDumpDotFile != _config.end() ? itDumpDotFile->second == YES : false;
#ifndef NDEBUG
dumpDotFile = true;
#endif
if (allEmpty) {
auto it = _config.find("TARGET_FALLBACK");
if (it != _config.end()) {
_heteroPlugin->SetAffinity(InferenceEngine::CNNNetwork(networkPtr), _config);
} else {
THROW_IE_EXCEPTION << "The 'TARGET_FALLBACK' option was not defined for heterogeneous plugin";
}
} else {
if (dumpDotFile) {
std::unordered_set<std::string> devicesSet;
details::CNNNetworkIterator i(&network);
while (i != details::CNNNetworkIterator()) {
CNNLayer::Ptr layer = *i;
if (!layer->affinity.empty()) {
devicesSet.insert(layer->affinity);
}
i++;
}
std::vector<std::string> devices{std::begin(devicesSet), std::end(devicesSet)};
std::stringstream stream(std::stringstream::out);
stream << "hetero_affinity_" << network.getName() << ".dot";
std::ofstream file(stream.str().c_str());
saveGraphToDot(network, file, HeteroLayerColorer{devices});
}
}
details::CNNNetworkIterator el(&network);
bool someEmptyAffinity = false;
CNNLayer::Ptr layerEmptyAffinity = nullptr;
while (el != details::CNNNetworkIterator()) {
CNNLayer::Ptr layer = *el;
if (!CaselessEq<std::string>()(layer->type, "input") &&
layer->affinity.empty()) {
someEmptyAffinity = true;
layerEmptyAffinity = layer;
break;
}
el++;
}
if (allEmpty && someEmptyAffinity) {
THROW_IE_EXCEPTION << "Hetero plugin used default fallback policy, but some layers eg: \n(Name:" <<
layerEmptyAffinity->name << ", Type: " << layerEmptyAffinity->type <<
") were not able to be assigned on any pointed device.\n" <<
"It happened because these layers are not supported in plugins by default.\n" <<
"You need to implement custom layers to support them.";
} else if (someEmptyAffinity) {
THROW_IE_EXCEPTION << "Network passed to LoadNetwork has affinity assigned, but some layers eg: \n(Name:" <<
layerEmptyAffinity->name << ", Type: " << layerEmptyAffinity->type <<
") were not assigned to any device.\n" <<
"It might happen if you assigned layers manually and missed some layers or\n" <<
"if you used some automatic assigning mode which decided that these layers are not\n" <<
"supported by any plugin";
}
InputsDataMap externalInputsData;
network.getInputsInfo(externalInputsData);
OutputsDataMap externalOutputsData;
network.getOutputsInfo(externalOutputsData);
auto subgraphs = splitGraph(network, getAffinities(network));
sortSubgraphs(subgraphs);
if (dumpDotFile) {
std::stringstream stream(std::stringstream::out);
stream << "hetero_subgraphs_" << network.getName() << ".dot";
std::ofstream file(stream.str().c_str());
dumpGraph(network, subgraphs, file);
}
std::vector<NetworkDesc> descs;
std::vector<CNNLayerPtr> tempLayers;
for (auto &&subgraph : subgraphs) {
auto affinity = (*subgraph.begin())->affinity;
tempLayers.assign(subgraph.begin(), subgraph.end());
auto tempNetwork = cloneNet(tempLayers);
auto name = network.getName() + "_" + std::to_string(std::distance(subgraphs.data(), &subgraph));
tempNetwork->setName(name);
// restoring some outputs from original net if they are not marked as output automatically
// this might happen if output was set manually for origin network and
// it doesn't go to next subgraph
for (auto il : tempLayers) {
if (externalOutputsData.find(il->name) != externalOutputsData.end()) {
tempNetwork->addOutput(il->name);
}
}
// update of pre-processing info
InputsDataMap clonedInputs;
tempNetwork->getInputsInfo(clonedInputs);
for (auto &&it : externalInputsData) {
auto inp = clonedInputs.find(it.first);
if (inp != clonedInputs.end() && nullptr != inp->second) {
inp->second->setPrecision(it.second->getPrecision());
inp->second->getPreProcess() = it.second->getPreProcess();
}
}
// go over all inputs/outputs and right now
// set precision for intermediate data (not for external) to FP32
for (auto &&it : clonedInputs) {
if (externalInputsData.find(it.first) == externalInputsData.end()) {
it.second->setPrecision(Precision::FP32);
}
}
OutputsDataMap tmpOutputs;
tempNetwork->getOutputsInfo(tmpOutputs);
for (auto &&o : tmpOutputs) {
if (externalOutputsData.find(o.first) == externalOutputsData.end()) {
o.second->setPrecision(Precision::FP32);
}
}
NetworkDesc desc;
desc._device = affinity;
desc._clonedNetwork = CNNNetwork{tempNetwork};
descs.emplace_back(std::move(desc));
}
for (auto &&d : descs) {
IExecutableNetwork::Ptr ret;
auto subnetworkInputs = d._clonedNetwork.getInputsInfo();
bool isInputSubnetwork = (subnetworkInputs.end() != std::find_first_of(
subnetworkInputs.begin(), subnetworkInputs.end(),
externalInputsData.begin(), externalInputsData.end(),
[] (const InputsDataMap::value_type& lhs, const InputsDataMap::value_type& rhs) {
return lhs.first == rhs.first;
}));
auto cfg = _config;
cfg[PluginConfigInternalParams::KEY_SUBNETWORK_WITH_NETWORK_INPUTS] =
isInputSubnetwork ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO);
auto deviceName = d._device;
auto metaDevices = _heteroPlugin->GetDevicePlugins(deviceName, cfg);
assert(metaDevices.size() == 1);
auto loadConfig = metaDevices[deviceName];
d._network = _heteroPlugin->GetCore()->LoadNetwork(d._clonedNetwork, deviceName, loadConfig);
}
networks = std::move(descs);
}
template<typename T>
using NodeMap = std::unordered_map<ngraph::Node*, T>;
void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::CNNNetwork& network_) {
auto function = network_.getFunction();
HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network,
const Engine::Configs& config,
Engine* plugin):
InferenceEngine::ExecutableNetworkThreadSafeDefault(
nullptr, std::make_shared<InferenceEngine::ImmediateExecutor>()),
_heteroPlugin{plugin},
_name{network.getName()},
_config{config} {
auto function = network.getFunction();
IE_ASSERT(function != nullptr);
auto clonedFunction = ngraph::clone_function(*function);
auto itDumpDotFile = _config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
bool dumpDotFile = itDumpDotFile != _config.end() ? (itDumpDotFile->second == YES) : false;
@ -346,7 +80,7 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::CNNNetwork& netw
if (queryNetworkResult.supportedLayersMap.empty()) {
auto it = _config.find("TARGET_FALLBACK");
if (it != _config.end()) {
queryNetworkResult = _heteroPlugin->QueryNetwork(network_, _config);
queryNetworkResult = _heteroPlugin->QueryNetwork(network, _config);
} else {
THROW_IE_EXCEPTION << "The 'TARGET_FALLBACK' option was not defined for heterogeneous plugin";
}
@ -380,7 +114,6 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::CNNNetwork& netw
std::unordered_set<std::string> devices;
NodeMap<std::string> affinities;
// Check that all nodes has user or plugin defined affinities
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
for (auto&& node : orderedOps) {
auto itAffinity = queryNetworkResult.supportedLayersMap.find(node->get_friendly_name());
if (itAffinity != queryNetworkResult.supportedLayersMap.end()) {
@ -631,8 +364,8 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::CNNNetwork& netw
std::move(std::begin(nextSubgraphs), std::end(nextSubgraphs), std::back_inserter(orderedSubgraphs));
} while (!allSubgraphs.empty());
InputsDataMap externalInputsData = network_.getInputsInfo();
OutputsDataMap externalOutputsData = network_.getOutputsInfo();
InputsDataMap externalInputsData = network.getInputsInfo();
OutputsDataMap externalOutputsData = network.getOutputsInfo();
networks.resize(orderedSubgraphs.size());
std::vector<std::shared_ptr<ngraph::Function>> subFunctions(orderedSubgraphs.size());
std::vector<bool> isInputSubnetwork(orderedSubgraphs.size());
@ -689,21 +422,6 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::CNNNetwork& netw
}
}
HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network,
const Engine::Configs& config,
Engine* plugin):
InferenceEngine::ExecutableNetworkThreadSafeDefault(
nullptr, std::make_shared<InferenceEngine::ImmediateExecutor>()),
_heteroPlugin{plugin},
_name{network.getName()},
_config{config} {
if (network.getFunction() == nullptr) {
InitCNNImpl(network);
} else {
InitNgraph(network);
}
}
HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel,
const std::map<std::string, std::string>& configs,
Engine* heteroPlugin) :
@ -818,7 +536,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream&
descs.emplace_back(NetworkDesc{
deviceName,
loaded ? CNNNetwork{cloneNet(static_cast<InferenceEngine::ICNNNetwork&>(cnnnetwork))} : CNNNetwork{},
loaded ? cnnnetwork : CNNNetwork{},
executableNetwork,
});
}
@ -842,38 +560,27 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) {
}
auto subnetworksNode = heteroNode.append_child("subnetworks");
std::map<std::shared_ptr<const ngraph::Function>, ::CNNNetwork> convertedNetworks;
for (auto&& subnetwork : networks) {
auto subnet = subnetwork._clonedNetwork;
if (subnet.getFunction()) {
subnet = convertedNetworks[subnet.getFunction()] =
InferenceEngine::CNNNetwork(
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(subnetwork._clonedNetwork));
}
auto subnetFunction = subnetwork._clonedNetwork.getFunction();
IE_ASSERT(subnetFunction != nullptr);
auto subnetworkNode = subnetworksNode.append_child("subnetwork");
subnetworkNode.append_attribute("device").set_value(subnetwork._device.c_str());
auto subnetworkInputsNode = subnetworkNode.append_child("inputs");
auto inputInfo = subnet.getInputsInfo();
for (auto&& input : inputInfo) {
for (auto&& parameter : subnetFunction->get_parameters()) {
auto inputNode = subnetworkInputsNode.append_child("input");
inputNode.append_attribute("name").set_value(input.first.c_str());
inputNode.append_attribute("precision").set_value(input.second->getPrecision().name());
inputNode.append_attribute("name").set_value(parameter->get_friendly_name().c_str());
inputNode.append_attribute("precision").set_value(parameter->get_output_element_type(0).get_type_name().c_str());
}
auto subnetworkOutputsNode = subnetworkNode.append_child("outputs");
auto outputInfo = subnet.getOutputsInfo();
for (auto&& output : outputInfo) {
for (auto&& result : subnetFunction->get_results()) {
auto outputNode = subnetworkOutputsNode.append_child("output");
auto creator = getCreatorLayer(output.second).lock();
outputNode.append_attribute("creatorName").set_value(creator->name.c_str());
outputNode.append_attribute("name").set_value(output.first.c_str());
outputNode.append_attribute("precision").set_value(output.second->getPrecision().name());
auto& outDatas = creator->outData;
auto itData = std::find_if(std::begin(outDatas), std::end(outDatas), [&] (const DataPtr& data) {
return output.first == data->getName();
});
IE_ASSERT(outDatas.end() != itData);
std::uint64_t index = std::distance(std::begin(outDatas), itData);
outputNode.append_attribute("index").set_value(std::to_string(index).c_str());
auto sourceOutput = result->input_value(0);
outputNode.append_attribute("creatorName").set_value(sourceOutput.get_node()->get_friendly_name().c_str());
outputNode.append_attribute("name").set_value(
(sourceOutput.get_node()->get_friendly_name() +
((sourceOutput.get_node()->get_output_size() == 0) ? "" : std::to_string(sourceOutput.get_index()))).c_str());
outputNode.append_attribute("precision").set_value(result->get_input_element_type(0).get_type_name().c_str());
outputNode.append_attribute("index").set_value(std::to_string(sourceOutput.get_index()).c_str());
}
}
@ -899,9 +606,6 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) {
#else
pugi::xml_document doc;
auto subnet = subnetwork._clonedNetwork;
if (subnet.getFunction()) {
subnet = convertedNetworks[subnet.getFunction()];
}
auto dataSize = static_cast<std::uint64_t>(InferenceEngine::Serialization::FillXmlDoc(subnet, doc));
doc.save(heteroModel, nullptr, pugi::format_raw);
heteroModel << std::endl;

View File

@ -20,7 +20,6 @@
#include "hetero_infer_request.hpp"
#include "ie_icore.hpp"
#include <legacy/cnn_network_impl.hpp>
#include "hetero_async_infer_request.hpp"
namespace HeteroPlugin {

View File

@ -1,236 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_graph_splitter.hpp"
#include "hetero_ade_util.hpp"
#include <cassert>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include <string>
#include <ade/typed_graph.hpp>
#include <ade/helpers/subgraphs.hpp>
#include <ade/util/filter_range.hpp>
#include <ade/util/iota_range.hpp>
namespace InferenceEngine {
namespace {
class ISplitChecker {
public:
struct GraphSelectionResult final {
static const constexpr std::size_t NoGraph
= static_cast<std::size_t>(-1);
std::size_t selectedGraph = NoGraph;
bool continueSelect = false;
};
virtual ~ISplitChecker() = default;
virtual GraphSelectionResult selectSubgraph(
const std::vector<LayersSet>& subgraphs) = 0;
};
class DefaultSplitChecker : public ISplitChecker {
public:
// ISplitChecker interface
GraphSelectionResult selectSubgraph(const std::vector<LayersSet>& subgraphs) override;
};
} // namespace
std::vector<LayersSet> splitGraph(ICNNNetwork& network,
const std::vector<std::string>& plugins) {
assert(!plugins.empty());
ade::Graph gr;
ade::TypedGraph<CNNLayerMetadata> tgr(gr);
std::vector<LayersSet> tempSubgraphs;
LayersSet tempSet1;
LayersSet tempSet2;
translateNetworkToAde(gr, network);
std::size_t currentChecker = 0;
DefaultSplitChecker checker;
auto getChecker = [&]() {
assert(currentChecker < plugins.size());
return &checker;
};
auto getAffinity = [&]()->const std::string& {
assert(currentChecker < plugins.size());
return plugins[currentChecker];
};
auto nodes = gr.nodes();
ade::subgraphs::NodesSet availableNodes(nodes.begin(), nodes.end());
std::vector<LayersSet> finalSubgraphs;
ade::SubgraphSelfReferenceChecker cycleChecker(nodes);
while (!availableNodes.empty()) {
auto subgraphs = ade::selectSubgraphs(
ade::util::filter(ade::util::toRange(availableNodes),
[&](const ade::NodeHandle& node) {
assert(nullptr != node);
auto layer = tgr.metadata(node).get<CNNLayerMetadata>().layer;
assert(nullptr != layer);
return layer->affinity == getAffinity();
}),
[&](
const ade::EdgeHandle& edge,
ade::SubgraphMergeDirection dir) {
assert(nullptr != edge);
auto dstNode = ade::getDstMergeNode(edge, dir);
assert(nullptr != dstNode);
if (!ade::util::contains(availableNodes, dstNode)) {
return false;
}
auto srcNode = ade::getSrcMergeNode(edge, dir);
assert(nullptr != srcNode);
auto srcLayer = tgr.metadata(srcNode).get<CNNLayerMetadata>().layer;
auto dstLayer = tgr.metadata(dstNode).get<CNNLayerMetadata>().layer;
assert(nullptr != srcLayer);
assert(nullptr != dstLayer);
return srcLayer->affinity == dstLayer->affinity;
},
[&](
const ade::subgraphs::NodesSet& acceptedNodes,
const ade::subgraphs::NodesSet& rejectedNodes) {
if (cycleChecker(acceptedNodes, rejectedNodes)) {
return false;
}
return true;
});
if (!subgraphs.empty()) {
if (plugins.size() == currentChecker) {
THROW_IE_EXCEPTION << "Some nodes weren't assigned to plugin";
}
tempSubgraphs.clear();
for (auto&& subgraph : subgraphs) {
assert(!subgraph.empty());
tempSet1.clear();
for (auto&& node : subgraph) {
assert(nullptr != node);
auto layer = tgr.metadata(node).get<CNNLayerMetadata>().layer;
assert(nullptr != layer);
tempSet1.insert(layer);
}
tempSubgraphs.emplace_back(std::move(tempSet1));
}
auto result = getChecker()->selectSubgraph(tempSubgraphs);
const auto selected = result.selectedGraph;
if (ISplitChecker::GraphSelectionResult::NoGraph !=
selected) {
assert(selected < subgraphs.size());
finalSubgraphs.emplace_back(std::move(tempSubgraphs[selected]));
for (auto&& node : subgraphs[selected]) {
availableNodes.erase(node);
}
if (result.continueSelect) {
continue;
}
}
}
++currentChecker;
}
return finalSubgraphs;
}
ISplitChecker::GraphSelectionResult DefaultSplitChecker::selectSubgraph(
const std::vector<LayersSet>& subgraphs) {
assert(!subgraphs.empty());
std::size_t index = 0;
auto maxSize = subgraphs[0].size();
for (auto i : ade::util::iota(std::size_t(1), subgraphs.size())) {
auto size = subgraphs[i].size();
if (size > maxSize) {
index = 1;
maxSize = size;
}
}
GraphSelectionResult ret;
ret.selectedGraph = index;
ret.continueSelect = true;
return ret;
}
namespace {
struct SubgraphDesc {
std::size_t topoIndex = static_cast<std::size_t>(-1);
std::unordered_set<std::size_t> dependsOn;
};
void topoVisitSubgraph(std::vector<SubgraphDesc>& subgraphs,
SubgraphDesc& subgraph,
std::size_t& topoIndex) {
if (subgraph.topoIndex != static_cast<std::size_t>(-1)) {
assert(subgraph.topoIndex < topoIndex);
return;
}
for (auto&& dep : subgraph.dependsOn) {
topoVisitSubgraph(subgraphs, subgraphs[dep], topoIndex);
}
subgraph.topoIndex = topoIndex;
++topoIndex;
}
} // namespace
void sortSubgraphs(std::vector<LayersSet>& subgraphs) {
std::vector<SubgraphDesc> descs(subgraphs.size());
for (auto i : ade::util::iota(subgraphs.size())) {
auto& subgraph = subgraphs[i];
assert(!subgraph.empty());
for (auto&& layer : subgraph) {
assert(nullptr != layer);
for (auto&& dataIt : layer->insData) {
auto data = dataIt.lock();
assert(nullptr != data);
auto prevLayer = getCreatorLayer(data).lock();
if (nullptr != prevLayer) {
for (auto j : ade::util::iota(subgraphs.size())) {
if (i != j) {
if (ade::util::contains(subgraphs[j], prevLayer)) {
descs[i].dependsOn.insert(j);
break;
}
}
}
}
}
}
}
{
std::size_t topoIndex = 0;
for (auto&& desc : descs) {
topoVisitSubgraph(descs, desc, topoIndex);
}
assert(subgraphs.size() == topoIndex);
}
std::vector<LayersSet> ret(subgraphs.size());
for (auto i : ade::util::iota(subgraphs.size())) {
assert(i < descs.size());
auto& desc = descs[i];
auto topoIndex = desc.topoIndex;
assert(topoIndex != static_cast<std::size_t>(-1));
assert(topoIndex < ret.size());
assert(!subgraphs[i].empty());
ret[topoIndex] = std::move(subgraphs[i]);
}
subgraphs = std::move(ret);
}
} // namespace InferenceEngine

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_blob.h>
#include <legacy/ie_layers.h>
#include <string>
#include <functional>
#include <unordered_set>
#include <vector>
#include <utility>
namespace InferenceEngine {
class ICNNNetwork;
using LayersSet = std::unordered_set<CNNLayerPtr>;
/// Split network on subgraphs based on layer affinity
///
/// @param network - source network
/// @param checkers - list of supported plugins
///
/// @return list of subgraphs
std::vector<LayersSet>
splitGraph(ICNNNetwork& network,
const std::vector<std::string>& plugins);
/// Sort sugraphs topologically, behaviour is undefined if there are circular
/// refences between subgraps
///
/// @param subgraphs - list of subgraphs
void
sortSubgraphs(std::vector<LayersSet>& subgraphs);
} // namespace InferenceEngine

View File

@ -5,7 +5,6 @@
#include "hetero_infer_request.hpp"
#include "hetero_itt.hpp"
#include <ie_blob.h>
#include <legacy/ie_util_internal.hpp>
#include <description_buffer.hpp>
#include <ie_layouts.h>
#include <ie_algorithm.hpp>

View File

@ -57,29 +57,12 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const
}
DeviceMetaInformationMap metaDevices = GetDevicePlugins(it->second, tconfig);
if (network.getFunction()) {
auto allSupportsNgraph =
std::all_of(std::begin(metaDevices), std::end(metaDevices),
[&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool {
auto& deviceName = metaDevice.first;
try { GetCore()->QueryNetwork(network, deviceName, metaDevice.second); }
catch (const InferenceEngine::details::InferenceEngineException & ex) {
std::string message = ex.what();
return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
}
return true;
});
if (!allSupportsNgraph) {
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
IE_ASSERT(cnnNetworkImpl != nullptr);
return std::make_shared<HeteroExecutableNetwork>(
InferenceEngine::CNNNetwork(cnnNetworkImpl), mergeConfigs(_config, config), this);
} else {
return std::make_shared<HeteroExecutableNetwork>(network, mergeConfigs(_config, config), this);
}
} else {
return std::make_shared<HeteroExecutableNetwork>(network, mergeConfigs(_config, config), this);
auto function = network.getFunction();
if (function == nullptr) {
THROW_IE_EXCEPTION << "HETERO plugin supports just ngraph network representation";
}
return std::make_shared<HeteroExecutableNetwork>(network, mergeConfigs(_config, config), this);
}
ExecutableNetwork Engine::ImportNetworkImpl(std::istream& heteroModel, const Configs& config) {
@ -141,59 +124,6 @@ void Engine::SetConfig(const Configs &configs) {
}
}
HeteroLayerColorer::HeteroLayerColorer(const std::vector<std::string>& devices) {
static const std::vector<std::string> colors = {"#5A5DF0", "#20F608", "#F1F290", "#11F110"};
for (auto&& device : devices) {
deviceColorMap[device] = colors[std::distance(&device, devices.data()) % colors.size()];
}
}
void HeteroLayerColorer::operator()(const CNNLayerPtr layer,
ordered_properties &printed_properties,
ordered_properties &node_properties) {
auto device = layer->affinity;
printed_properties.insert(printed_properties.begin(), std::make_pair("device", device));
node_properties.emplace_back("fillcolor", deviceColorMap[device]);
}
void Engine::SetAffinity(const InferenceEngine::CNNNetwork &network, const Configs &config) {
QueryNetworkResult qr = QueryNetwork(network, config);
details::CNNNetworkIterator i(network);
while (i != details::CNNNetworkIterator()) {
CNNLayer::Ptr layer = *i;
auto it = qr.supportedLayersMap.find(layer->name);
if (it != qr.supportedLayersMap.end()) {
layer->affinity = it->second;
}
i++;
}
auto dumpDot = [](const Configs & config) {
auto it = config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
return it != config.end() ? it->second == YES : false;
};
if (dumpDot(config) || dumpDot(_config)) {
std::unordered_set<std::string> devicesSet;
details::CNNNetworkIterator i(network);
while (i != details::CNNNetworkIterator()) {
CNNLayer::Ptr layer = *i;
if (!layer->affinity.empty()) {
devicesSet.insert(layer->affinity);
}
i++;
}
std::vector<std::string> devices{std::begin(devicesSet), std::end(devicesSet)};
std::stringstream stream(std::stringstream::out);
stream << "hetero_affinity_" << network.getName() << ".dot";
std::ofstream file(stream.str());
saveGraphToDot(static_cast<const InferenceEngine::ICNNNetwork&>(network),
file, HeteroLayerColorer{devices});
}
}
QueryNetworkResult Engine::QueryNetwork(const CNNNetwork &network, const Configs& config) const {
QueryNetworkResult qr;
@ -210,40 +140,15 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork &network, const Configs
std::string fallbackDevicesStr = it->second;
DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig);
std::map<std::string, QueryNetworkResult> queryResults;
auto queryNetwork = [&] (const InferenceEngine::CNNNetwork & networkObject) {
// go over devices and call query network
for (auto&& metaDevice : metaDevices) {
auto& deviceName = metaDevice.first;
queryResults[deviceName] = GetCore()->QueryNetwork(networkObject, deviceName, metaDevice.second);
}
return queryResults;
};
auto function = network.getFunction();
if (function == nullptr) {
THROW_IE_EXCEPTION << "HETERO plugin supports just ngraph network representation";
}
if (network.getFunction()) {
auto allSupportsNgraph =
std::all_of(std::begin(metaDevices), std::end(metaDevices),
[&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool {
auto& deviceName = metaDevice.first;
try { GetCore()->QueryNetwork(network, deviceName, metaDevice.second); }
catch (const InferenceEngine::details::InferenceEngineException & ex) {
std::string message = ex.what();
return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
}
return true;
});
if (!allSupportsNgraph) {
if (contains(tconfig, CONFIG_KEY_INTERNAL(AGGREGATED_PLUGIN))) {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
} else {
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
queryNetwork(InferenceEngine::CNNNetwork(cnnNetworkImpl));
}
} else {
queryNetwork(network);
}
} else {
queryNetwork(network);
std::map<std::string, QueryNetworkResult> queryResults;
for (auto&& metaDevice : metaDevices) {
auto& deviceName = metaDevice.first;
queryResults[deviceName] = GetCore()->QueryNetwork(network, deviceName, metaDevice.second);
}
// WARNING: Here is devices with user set priority

View File

@ -13,7 +13,6 @@
#include <unordered_map>
#include <vector>
#include <utility>
#include <legacy/ie_util_internal.hpp>
namespace HeteroPlugin {
@ -40,24 +39,10 @@ public:
ExecutableNetwork ImportNetworkImpl(std::istream& heteroModel, const Configs& config) override;
void SetAffinity(const InferenceEngine::CNNNetwork& network, const Configs &config);
DeviceMetaInformationMap GetDevicePlugins(const std::string& targetFallback,
const Configs & localConfig) const;
private:
Configs GetSupportedConfig(const Configs& config, const std::string & deviceName) const;
};
struct HeteroLayerColorer {
explicit HeteroLayerColorer(const std::vector<std::string>& devices);
void operator() (const CNNLayerPtr layer,
ordered_properties &printed_properties,
ordered_properties &node_properties);
std::unordered_map<std::string, std::string> deviceColorMap;
};
} // namespace HeteroPlugin