Fix query network for hetero plugin (#10556)

* Fix query network for hetero plugin

* Apply comments

* Fix1

* Add tests

* Apply comments 2

* Apply comments 3
This commit is contained in:
Oleg Pipikin
2022-03-31 07:24:46 +03:00
committed by GitHub
parent d107cec39f
commit 88e20199f0
9 changed files with 645 additions and 29 deletions

View File

@@ -335,6 +335,17 @@ protected:
void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
const std::shared_ptr<const ov::Model>& function);
/**
* @brief Returns set of nodes which were removed after transformation.
* If originalFunction contains node1 and transformedFunction does not
* contains node1 in ops list, node1 will be returned.
* @param originalFunction Original network
* @param transformedFunction Transformed network
* @return Set of strings which contains removed node names
*/
std::unordered_set<std::string> GetRemovedNodes(const std::shared_ptr<const ov::Model>& originalFunction,
const std::shared_ptr<const ov::Model>& transformedFunction) const;
std::string _pluginName; //!< A device name that plugins enables
std::map<std::string, std::string> _config; //!< A map config keys -> values
std::weak_ptr<ov::ICore> _core; //!< A pointer to ICore interface

View File

@@ -20,6 +20,7 @@
#include "cnn_network_ngraph_impl.hpp"
#include "cpp/ie_cnn_network.h"
#include "exec_graph_info.hpp"
#include "ie_algorithm.hpp"
#include "ie_api.h"
#include "ie_icore.hpp"
#include "ie_iextension.h"
@@ -300,6 +301,26 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetwor
exeNetwork->SetPointerToPlugin(shared_from_this());
}
std::unordered_set<std::string> IInferencePlugin::GetRemovedNodes(
const std::shared_ptr<const ov::Model>& originalFunction,
const std::shared_ptr<const ov::Model>& transformedFunction) const {
std::unordered_set<std::string> result = {};
std::unordered_set<std::string> transformedNodeNames = {};
for (auto&& node : transformedFunction->get_ops()) {
transformedNodeNames.emplace(node->get_friendly_name());
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(node))
transformedNodeNames.emplace(fusedLayerName);
}
for (auto&& originalNode : originalFunction->get_ops()) {
if (!InferenceEngine::details::contains(transformedNodeNames, originalNode->get_friendly_name()))
result.emplace(originalNode->get_friendly_name());
}
return result;
}
void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
const std::shared_ptr<const ov::Model>& function,
bool new_api) {

View File

@@ -915,28 +915,49 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
}
auto clonedNetwork = InferenceEngine::details::cloneNetwork(network);
auto clonnedFunction = clonedNetwork.getFunction();
const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE);
const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/
|| Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled */;
const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch || (conf.enforceBF16
&& dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)));
Transformation(clonedNetwork, enableLPT, enableSnippets, isLegacyAPI());
auto ops = clonedNetwork.getFunction()->get_ordered_ops();
std::unordered_set<std::string> supported;
auto ops = clonnedFunction->get_ordered_ops();
//Mark removed nodes as supported
std::unordered_set<std::string> supported = GetRemovedNodes(function, clonnedFunction);;
std::unordered_set<std::string> unsupported;
for (auto op : ops) {
auto layerIsSupported = [&] {
std::unique_ptr<Node> ptr;
try {
ptr.reset(Node::factory().create(op, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
} catch (InferenceEngine::Exception&) {
return false;
auto layerIsSupported = [&](const std::shared_ptr<ngraph::Node>& op) {
std::unique_ptr<Node> ptr;
try {
ptr.reset(Node::factory().create(op, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
} catch (const InferenceEngine::Exception&) {
return false;
}
return true;
};
for (auto&& op : ops) {
bool isSupported = false;
bool wasNodeAlreadyChecked = false;
if (InferenceEngine::details::contains(originalOps, op->get_friendly_name())) {
isSupported = layerIsSupported(op);
wasNodeAlreadyChecked = true;
if (isSupported) {
supported.emplace(op->get_friendly_name());
} else {
unsupported.emplace(op->get_friendly_name());
}
return true;
} ();
}
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(op)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (layerIsSupported) {
if (!wasNodeAlreadyChecked) {
isSupported = layerIsSupported(op);
wasNodeAlreadyChecked = true;
}
if (isSupported) {
supported.emplace(fusedLayerName);
} else {
unsupported.emplace(fusedLayerName);

View File

@@ -398,10 +398,14 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork& network,
auto clonedNetwork = CloneAndTransformNetwork(network, conf);
auto func = clonedNetwork.getFunction();
auto ops = func->get_ordered_ops();
std::unordered_set<std::string> supported;
//Mark removed nodes as supported
std::unordered_set<std::string> supported = GetRemovedNodes(function, func);;
std::unordered_set<std::string> unsupported;
std::unordered_set<std::string> constantsNames;
std::unordered_set<std::string> supportedNotOriginal;
std::unordered_set<std::string> unsupportedNotOriginal;
std::vector<std::shared_ptr<ngraph::Node>> constants;
std::map<std::string, ngraph::PartialShape> shapes;
@@ -442,34 +446,46 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork& network,
return false;
}
if (ngraph::is_type<const ngraph::op::v0::Constant>(node)) {
constantsNames.emplace(node->get_friendly_name());
constants.push_back(node);
return false;
}
return prog.IsOpSupported(network, node) &&
!ngraph::op::is_parameter(node) &&
!ngraph::op::is_output(node);
return prog.IsOpSupported(network, node) ||
ngraph::op::is_parameter(node) ||
ngraph::op::is_output(node);
};
// Get ops after transformations and check if it's supported
// Transformations might lead to the situation when single node is merged to multiple operations,
// so we mark original op as supported only if all nodes that it was merged into are supported
bool wasNodeAlreadyChecked = false;
bool isSupported = false;
for (auto&& op : ops) {
wasNodeAlreadyChecked = false;
isSupported = false;
bool isSupported = layerIsSupported(op);
if (InferenceEngine::details::contains(originalOpNames, op->get_friendly_name())) {
if (isSupported) {
supported.emplace(op->get_friendly_name());
} else {
unsupported.emplace(op->get_friendly_name());
}
} else {
if (isSupported) {
supportedNotOriginal.emplace(op->get_friendly_name());
} else {
unsupportedNotOriginal.emplace(op->get_friendly_name());
}
}
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(op)) {
if (InferenceEngine::details::contains(originalOpNames, fusedLayerName)) {
if (!wasNodeAlreadyChecked) {
isSupported = layerIsSupported(op);
wasNodeAlreadyChecked = true;
}
if (isSupported) {
supported.emplace(fusedLayerName);
} else {
unsupported.emplace(fusedLayerName);
}
} else {
if (isSupported) {
supportedNotOriginal.emplace(fusedLayerName);
} else {
unsupportedNotOriginal.emplace(fusedLayerName);
}
}
}
}
@@ -481,22 +497,35 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork& network,
}
unsupported.clear();
for (auto&& layerName : unsupportedNotOriginal) {
if (InferenceEngine::details::contains(supportedNotOriginal, layerName)) {
supportedNotOriginal.erase(layerName);
}
}
unsupportedNotOriginal.clear();
// 1. Constants are marked as supported when all outputs can be offloaded to GPU
for (const auto& op : constants) {
bool is_supported = true;
for (size_t i = 0; i < op->get_output_size(); i++) {
auto outTensors = op->get_output_target_inputs(i);
for (auto& t : outTensors) {
auto output = t.get_node();
const auto& name = output->get_friendly_name();
if (!InferenceEngine::details::contains(supported, name)) {
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(supportedNotOriginal, name)) {
is_supported = false;
break;
}
}
}
if (is_supported) {
supported.emplace(op->get_friendly_name());
if (InferenceEngine::details::contains(originalOpNames, op->get_friendly_name()))
supported.emplace(op->get_friendly_name());
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(op))
if (InferenceEngine::details::contains(originalOpNames, fusedLayerName))
supported.emplace(fusedLayerName);
}
}

View File

@@ -47,6 +47,7 @@ InferenceEngine::QueryNetworkResult getQueryNetwork(const InferenceEngine::CNNNe
}
};
for (InferenceEngine::details::CNNNetworkIterator itLayer{convertedNetwork};
itLayer != InferenceEngine::details::CNNNetworkIterator();
itLayer++) {
@@ -55,9 +56,25 @@ InferenceEngine::QueryNetworkResult getQueryNetwork(const InferenceEngine::CNNNe
continue;
}
bool isSupported = false;
bool wasNodeAlreadyChecked = false;
if (InferenceEngine::details::contains(originalOps, fusedNode->get_friendly_name())) {
isSupported = isLayerSupported(itLayer);
wasNodeAlreadyChecked = true;
if (isSupported) {
supported.emplace(fusedNode->get_friendly_name());
} else {
unsupported.emplace(fusedNode->get_friendly_name());
}
}
for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (isLayerSupported(itLayer)) {
if (!wasNodeAlreadyChecked) {
isSupported = isLayerSupported(itLayer);
wasNodeAlreadyChecked = true;
}
if (isSupported) {
supported.emplace(fusedLayerName);
} else {
unsupported.emplace(fusedLayerName);

View File

@@ -180,9 +180,16 @@ QueryNetworkResult Engine::QueryNetwork(
if (auto function = supportedNetwork.getFunction()) {
auto clonedNetwork = cloneNetwork(supportedNetwork);
auto clonedFunction = clonedNetwork.getFunction();
auto convertedNetwork = vpu::FrontEnd::convertNetwork(clonedNetwork);
QueryNetworkResult supportedRes = getQueryNetwork(clonedNetwork, function, GetName(), supportedLayers);
auto removedNodeNames = GetRemovedNodes(function, clonedFunction);
for (const auto& layer : removedNodeNames) {
res.supportedLayersMap.emplace(layer, GetName());
}
for (const auto& layer : supportedRes.supportedLayersMap) {
res.supportedLayersMap.insert(layer);
}

View File

@@ -0,0 +1,22 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/plugin/hetero_query_network.hpp"
using namespace HeteroTests;
namespace HeteroTests {
TEST_P(HeteroQueryNetworkTest, HeteroSinglePlugin) {
std::string deviceName = GetParam();
RunTest(deviceName);
}
INSTANTIATE_TEST_CASE_P(
HeteroCpu,
HeteroQueryNetworkTest,
::testing::Values(
std::string("HETERO:CPU")));
} // namespace HeteroTests

View File

@@ -0,0 +1,22 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/plugin/hetero_query_network.hpp"
using namespace HeteroTests;
namespace HeteroTests {
TEST_P(HeteroQueryNetworkTest, HeteroSinglePlugin) {
std::string deviceName = GetParam();
RunTest(deviceName);
}
INSTANTIATE_TEST_CASE_P(
HeteroGpu,
HeteroQueryNetworkTest,
::testing::Values(
std::string("HETERO:GPU")));
} // namespace HeteroTests

View File

@@ -0,0 +1,466 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "common_test_utils/test_common.hpp"
#include <ngraph/opsets/opset8.hpp>
#include <ie/ie_core.hpp>
using namespace InferenceEngine;
namespace HeteroTests {
class HeteroQueryNetworkTest : public ::testing::TestWithParam<std::string> {
public:
void RunTest(std::string& deviceName) {
ASSERT_GT(deviceName.size(), 0);
//this model is a subgraph of "ctpn" model from omz
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer id="0" name="input" type="Parameter" version="opset1">
<data shape="1,37,370,2" element_type="F32" />
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>37</dim>
<dim>370</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="338" name="rpn_cls_prob/Transpose7580/value758213165" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="0" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="339" name="rpn_cls_prob/Transpose7580" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>37</dim>
<dim>370</dim>
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>37</dim>
<dim>370</dim>
</port>
</output>
</layer>
<layer id="340" name="rpn_cls_prob/Transpose/value756213066" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="341" name="rpn_cls_prob/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>37</dim>
<dim>370</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="rpn_cls_prob:0">
<dim>1</dim>
<dim>37</dim>
<dim>370</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="342" name="Shape_2" type="ShapeOf" version="opset3">
<data output_type="i32" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>2</dim>
<dim>37</dim>
<dim>370</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="343" name="Shape_2/GatherNCHWtoNHWC_input_port_1/value778413036" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="64" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="344" name="Shape_2/GatherNCHWtoNHWC_input_port_2/value778613297" type="Const" version="opset1">
<data element_type="i64" shape="" offset="80" size="8" />
<output>
<port id="0" precision="I64" />
</output>
</layer>
<layer id="345" name="Shape_2/GatherNCHWtoNHWC" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I32">
<dim>4</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
<port id="2" precision="I64" />
</input>
<output>
<port id="3" precision="I32" names="Shape_2:0">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="346" name="strided_slice_6/stack" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="88" size="8" />
<output>
<port id="0" precision="I64" names="strided_slice_6/stack:0">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="347" name="strided_slice_6/stack_1" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="96" size="8" />
<output>
<port id="0" precision="I64" names="strided_slice_6/stack_1:0">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="348" name="strided_slice_6/stack_2" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="104" size="8" />
<output>
<port id="0" precision="I64" names="strided_slice_6/stack_2:0">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="349" name="strided_slice_6" type="StridedSlice" version="opset1">
<data begin_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="1" ellipsis_mask="0" />
<input>
<port id="0" precision="I32">
<dim>4</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I32" names="strided_slice_6:0" />
</output>
</layer>
<layer id="350" name="Reshape_2/shape/Unsqueeze_input_port_1/value" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="112" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="351" name="Reshape_2/shape/Unsqueeze" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I32" />
<port id="1" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="352" name="strided_slice_7/stack" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="120" size="8" />
<output>
<port id="0" precision="I64" names="strided_slice_7/stack:0">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="353" name="strided_slice_7/stack_1" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="128" size="8" />
<output>
<port id="0" precision="I64" names="strided_slice_7/stack_1:0">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="354" name="strided_slice_7/stack_2" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="136" size="8" />
<output>
<port id="0" precision="I64" names="strided_slice_7/stack_2:0">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="355" name="strided_slice_7" type="StridedSlice" version="opset1">
<data begin_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="1" ellipsis_mask="0" />
<input>
<port id="0" precision="I32">
<dim>4</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I32" names="strided_slice_7:0" />
</output>
</layer>
<layer id="356" name="Reshape_2/shape/Unsqueeze531_input_port_1/value" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="144" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="357" name="Reshape_2/shape/Unsqueeze531" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I32" />
<port id="1" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="358" name="Reshape_2/shape/Unsqueeze533" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="152" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="359" name="Reshape_2/shape/Unsqueeze535" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="156" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="360" name="Reshape_2/shape" type="Concat" version="opset1">
<data axis="0" />
<input>
<port id="0" precision="I32">
<dim>1</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
<port id="2" precision="I32">
<dim>1</dim>
</port>
<port id="3" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="4" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="361" name="Reshape_2/Cast_1" type="Convert" version="opset1">
<data destination_type="i64" />
<input>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="1" precision="I64" names="Reshape_2/shape:0">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="362" name="Reshape_2" type="Reshape" version="opset1">
<data special_zero="false" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>37</dim>
<dim>370</dim>
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="Reshape_2,Reshape_2:0">
<dim>1</dim>
<dim>37</dim>
<dim>37</dim>
<dim>20</dim>
</port>
</output>
</layer>
<layer id="363" name="Reshape_2:0" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>37</dim>
<dim>37</dim>
<dim>20</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="339" to-port="0" />
<edge from-layer="338" from-port="0" to-layer="339" to-port="1" />
<edge from-layer="339" from-port="2" to-layer="341" to-port="0" />
<edge from-layer="339" from-port="2" to-layer="342" to-port="0" />
<edge from-layer="340" from-port="0" to-layer="341" to-port="1" />
<edge from-layer="341" from-port="2" to-layer="362" to-port="0" />
<edge from-layer="342" from-port="1" to-layer="345" to-port="0" />
<edge from-layer="343" from-port="0" to-layer="345" to-port="1" />
<edge from-layer="344" from-port="0" to-layer="345" to-port="2" />
<edge from-layer="345" from-port="3" to-layer="355" to-port="0" />
<edge from-layer="345" from-port="3" to-layer="349" to-port="0" />
<edge from-layer="346" from-port="0" to-layer="349" to-port="1" />
<edge from-layer="347" from-port="0" to-layer="349" to-port="2" />
<edge from-layer="348" from-port="0" to-layer="349" to-port="3" />
<edge from-layer="349" from-port="4" to-layer="351" to-port="0" />
<edge from-layer="350" from-port="0" to-layer="351" to-port="1" />
<edge from-layer="351" from-port="2" to-layer="360" to-port="0" />
<edge from-layer="352" from-port="0" to-layer="355" to-port="1" />
<edge from-layer="353" from-port="0" to-layer="355" to-port="2" />
<edge from-layer="354" from-port="0" to-layer="355" to-port="3" />
<edge from-layer="355" from-port="4" to-layer="357" to-port="0" />
<edge from-layer="356" from-port="0" to-layer="357" to-port="1" />
<edge from-layer="357" from-port="2" to-layer="360" to-port="1" />
<edge from-layer="358" from-port="0" to-layer="360" to-port="2" />
<edge from-layer="359" from-port="0" to-layer="360" to-port="3" />
<edge from-layer="360" from-port="4" to-layer="361" to-port="0" />
<edge from-layer="361" from-port="1" to-layer="362" to-port="1" />
<edge from-layer="362" from-port="2" to-layer="363" to-port="0" />
</edges>
</net>
)V0G0N";
InferenceEngine::Core ie;
Blob::Ptr weights;
weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {160}, InferenceEngine::Layout::C));
weights->allocate();
auto *dataI64 = weights->buffer().as<int64_t *>();
auto *dataI32 = weights->buffer().as<int32_t *>();
dataI64[0] = 0;
dataI64[1] = 3;
dataI64[2] = 1;
dataI64[3] = 2;
dataI64[4] = 0;
dataI64[5] = 2;
dataI64[6] = 3;
dataI64[7] = 1;
dataI32[16] = 0;
dataI32[17] = 2;
dataI32[18] = 3;
dataI32[19] = 1;
dataI64[10] = 0;
dataI64[11] = 0;
dataI64[12] = 1;
dataI64[13] = 1;
dataI64[14] = 0;
dataI64[15] = 1;
dataI64[16] = 2;
dataI64[17] = 1;
dataI64[18] = 0;
dataI32[38] = 0xFFFFFFFF;
dataI32[39] = 20;
auto network = ie.ReadNetwork(model, weights);
QueryNetworkResult result;
OV_ASSERT_NO_THROW(result = ie.QueryNetwork(network, deviceName));
ASSERT_EQ(27, result.supportedLayersMap.size());
std::set<std::string> checkNames = {"input",
"rpn_cls_prob/Transpose7580/value758213165",
"rpn_cls_prob/Transpose7580",
"rpn_cls_prob/Transpose/value756213066",
"rpn_cls_prob/Transpose",
"Shape_2",
"Shape_2/GatherNCHWtoNHWC_input_port_1/value778413036",
"Shape_2/GatherNCHWtoNHWC_input_port_2/value778613297",
"Shape_2/GatherNCHWtoNHWC",
"strided_slice_6/stack",
"strided_slice_6/stack_1",
"strided_slice_6/stack_2",
"strided_slice_6",
"Reshape_2/shape/Unsqueeze_input_port_1/value",
"Reshape_2/shape/Unsqueeze",
"strided_slice_7/stack",
"strided_slice_7/stack_1",
"strided_slice_7/stack_2",
"strided_slice_7",
"Reshape_2/shape/Unsqueeze531_input_port_1/value",
"Reshape_2/shape/Unsqueeze531",
"Reshape_2/shape/Unsqueeze533",
"Reshape_2/shape/Unsqueeze535",
"Reshape_2/shape",
"Reshape_2/Cast_1",
"Reshape_2",
"Reshape_2:0"};
for (auto&& name : checkNames)
EXPECT_NE(result.supportedLayersMap.find(name), result.supportedLayersMap.end());
}
};
} // namespace HeteroTests