Remove implicit conversion from getInputTo, getLayerCreator (#1274)
* Added ctor for CNNNetworkImpl to convert from ngraphImpl * Re-use in all places instead of manual conversion * Hide convertToCNNNetworkImpl usage * Removed conversion from getCreatorLayer * Fixes 2 * Fixes 3 * Fixes 4 * Fixed ieFuncTests * Fixed more tests * Fixed LPT tests * Remove useless test * Fixed GNA * Fixed Gleb's comments * Fixed Core integration tests * Trying to fix python * Fixed GPU tests * Small fixes * Fixed QueryNetwork after removing implicit conversion * Fixed Core integration tests for QueryNetwork * Fixed python; MULTI device QueryNetwork * Fixed MULTI QueryNetwork * Removed unused methods * Enabled LPT FullyConnectedTestModel test * Fixed typo in python
This commit is contained in:
parent
930d687ed9
commit
949fee3cfc
@ -57,6 +57,7 @@ cdef class IECore:
|
||||
|
||||
cdef class DataPtr:
|
||||
cdef C.DataPtr _ptr
|
||||
cdef C.IENetwork * _ptr_network
|
||||
|
||||
cdef class CDataPtr:
|
||||
cdef C.CDataPtr _ptr
|
||||
@ -69,6 +70,7 @@ cdef class TensorDesc:
|
||||
|
||||
cdef class InputInfoPtr:
|
||||
cdef InputInfo.Ptr _ptr
|
||||
cdef C.IENetwork * _ptr_network
|
||||
|
||||
cdef class InputInfoCPtr:
|
||||
cdef InputInfo.CPtr _ptr
|
||||
|
@ -651,6 +651,7 @@ cdef class InputInfoPtr:
|
||||
def input_data(self):
|
||||
cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData()
|
||||
data_ptr = DataPtr()
|
||||
data_ptr._ptr_network = self._ptr_network
|
||||
data_ptr._ptr = c_data_ptr
|
||||
return data_ptr
|
||||
|
||||
@ -694,6 +695,10 @@ cdef class InputInfoCPtr:
|
||||
|
||||
## This class is the layer data representation.
|
||||
cdef class DataPtr:
|
||||
## Default constructor
|
||||
def __init__(self):
|
||||
self._ptr_network = NULL
|
||||
|
||||
## Name of the data object
|
||||
@property
|
||||
def name(self):
|
||||
@ -735,8 +740,13 @@ cdef class DataPtr:
|
||||
|
||||
@property
|
||||
def creator_layer(self):
|
||||
cdef C.CNNLayerWeakPtr _l_ptr = C.getCreatorLayer(self._ptr)
|
||||
cdef C.CNNLayerWeakPtr _l_ptr
|
||||
cdef IENetLayer creator_layer
|
||||
|
||||
if self._ptr_network != NULL:
|
||||
deref(self._ptr_network).convertToOldRepresentation()
|
||||
_l_ptr = C.getCreatorLayer(self._ptr)
|
||||
|
||||
creator_layer = IENetLayer()
|
||||
if _l_ptr.lock() != NULL:
|
||||
creator_layer._ptr = _l_ptr.lock()
|
||||
@ -746,8 +756,13 @@ cdef class DataPtr:
|
||||
|
||||
@property
|
||||
def input_to(self):
|
||||
cdef map[string, C.CNNLayerPtr] _l_ptr_map = C.getInputTo(self._ptr)
|
||||
cdef map[string, C.CNNLayerPtr] _l_ptr_map
|
||||
cdef IENetLayer input_to
|
||||
|
||||
if self._ptr_network != NULL:
|
||||
deref(self._ptr_network).convertToOldRepresentation()
|
||||
_l_ptr_map = C.getInputTo(self._ptr)
|
||||
|
||||
input_to_list = []
|
||||
for layer in _l_ptr_map:
|
||||
input_to = IENetLayer()
|
||||
@ -1496,6 +1511,7 @@ cdef class IENetwork:
|
||||
for input in c_inputs:
|
||||
input_info_ptr = InputInfoPtr()
|
||||
input_info_ptr._ptr = input.second
|
||||
input_info_ptr._ptr_network = &self.impl
|
||||
inputs[input.first.decode()] = input_info_ptr
|
||||
return inputs
|
||||
|
||||
@ -1514,6 +1530,7 @@ cdef class IENetwork:
|
||||
cdef DataPtr data_ptr
|
||||
for input in c_inputs:
|
||||
data_ptr = DataPtr()
|
||||
data_ptr._ptr_network = &self.impl
|
||||
data_ptr._ptr = input.second
|
||||
inputs[input.first.decode()] = data_ptr
|
||||
return inputs
|
||||
@ -1526,6 +1543,7 @@ cdef class IENetwork:
|
||||
cdef DataPtr data_ptr
|
||||
for output in c_outputs:
|
||||
data_ptr = DataPtr()
|
||||
data_ptr._ptr_network = &self.impl
|
||||
data_ptr._ptr = output.second
|
||||
outputs[output.first.decode()] = data_ptr
|
||||
return outputs
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "hetero/hetero_plugin_config.hpp"
|
||||
#include "ie_iinfer_request.hpp"
|
||||
#include "details/ie_cnn_network_tools.h"
|
||||
#include "cnn_network_impl.hpp"
|
||||
|
||||
const std::string EXPORTED_NETWORK_NAME = "undefined";
|
||||
std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
|
||||
@ -208,8 +209,17 @@ void InferenceEnginePython::IENetwork::serialize(const std::string &path_to_xml,
|
||||
actual->serialize(path_to_xml, path_to_bin);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IENetwork::convertToOldRepresentation() {
|
||||
if (actual->getFunction()) {
|
||||
// convert to old representation
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(*actual);
|
||||
actual = std::make_shared<InferenceEngine::CNNNetwork>(convertedNetwork);
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector <InferenceEngine::CNNLayerPtr>
|
||||
InferenceEnginePython::IENetwork::getLayers() {
|
||||
convertToOldRepresentation();
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
std::vector<InferenceEngine::CNNLayerPtr> result;
|
||||
std::vector<InferenceEngine::CNNLayerPtr> sorted_layers = InferenceEngine::details::CNNNetSortTopologically(*actual);
|
||||
|
@ -72,6 +72,8 @@ struct IENetwork {
|
||||
IENetwork(PyObject* network);
|
||||
|
||||
IENetwork() = default;
|
||||
|
||||
void convertToOldRepresentation();
|
||||
};
|
||||
|
||||
|
||||
|
@ -49,7 +49,6 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
|
||||
ctypedef weak_ptr[Data] DataWeakPtr
|
||||
ctypedef shared_ptr[const Data] CDataPtr
|
||||
|
||||
|
||||
cdef cppclass InputInfo:
|
||||
ctypedef shared_ptr[InputInfo] Ptr
|
||||
ctypedef shared_ptr[const InputInfo] CPtr
|
||||
@ -194,6 +193,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
||||
void reshape(map[string, vector[size_t]] input_shapes) except +
|
||||
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
|
||||
object getFunction() except +
|
||||
void convertToOldRepresentation() except +
|
||||
|
||||
cdef cppclass InferRequestWrap:
|
||||
double exec_time;
|
||||
|
@ -49,6 +49,24 @@ def test_input_to():
|
||||
assert len(input_to) == 1
|
||||
assert input_to[0].name == '27'
|
||||
|
||||
def test_input_to_via_input_info():
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
input_infos = net.input_info
|
||||
assert len(input_infos) == 1
|
||||
input_to = input_infos['data'].input_data.input_to
|
||||
assert len(input_to) == 1
|
||||
assert input_to[0].name == '19/Fused_Add_'
|
||||
|
||||
def test_input_to_via_inputs():
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
inputs = net.inputs
|
||||
assert len(inputs) == 1
|
||||
input_to = inputs['data'].input_to
|
||||
assert len(input_to) == 1
|
||||
assert input_to[0].name == '19/Fused_Add_'
|
||||
|
||||
def test_creator_layer():
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
|
@ -39,7 +39,7 @@ def test_precision_setter(recwarn):
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
|
||||
|
||||
def test_affinuty_getter():
|
||||
def test_affinity_getter():
|
||||
ie = IECore()
|
||||
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
assert net.layers['27'].affinity == ""
|
||||
|
@ -265,6 +265,10 @@ void clDNNEngine::QueryNetwork(const ICNNNetwork& network, const std::map<std::s
|
||||
// Verify device id
|
||||
GetDeviceInfo(config);
|
||||
|
||||
if (network.getFunction()) {
|
||||
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
|
||||
}
|
||||
|
||||
std::vector<CNNLayerPtr> sortedLayers = CNNNetSortTopologically(network);
|
||||
for (auto layer : sortedLayers) {
|
||||
if (CaselessEq<std::string>()(layer->type, "DetectionOutput")) {
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include <net_pass.h>
|
||||
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
|
||||
#include "gna_infer_request.hpp"
|
||||
#include "gna_plugin.hpp"
|
||||
@ -31,9 +30,6 @@ class GNAExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafe
|
||||
|
||||
GNAExecutableNetwork(InferenceEngine::ICNNNetwork &network, std::shared_ptr<GNAPlugin> plg)
|
||||
: plg(plg) {
|
||||
InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
|
||||
InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
|
||||
InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32);
|
||||
plg->LoadNetwork(network);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include <low_precision_transformations/blob_transformation.hpp>
|
||||
#include <graph_tools.hpp>
|
||||
#include <net_pass.h>
|
||||
#include <debug.h>
|
||||
#include <gna/gna_config.hpp>
|
||||
#include "gna_plugin_config.hpp"
|
||||
@ -337,7 +338,17 @@ void GNAPlugin::InitGNADevice() {
|
||||
graphCompiler.setGNAMemoryPtr(gnamem);
|
||||
}
|
||||
|
||||
void GNAPlugin::LoadNetwork(ICNNNetwork &network) {
|
||||
void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
|
||||
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
|
||||
if (_network.getFunction()) {
|
||||
convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(_network);
|
||||
}
|
||||
InferenceEngine::ICNNNetwork &network = convertedNetwork ? *convertedNetwork : _network;
|
||||
|
||||
NetPass::ConvertPrecision(network, Precision::I64, Precision::I32);
|
||||
NetPass::ConvertPrecision(network, Precision::U64, Precision::I32);
|
||||
NetPass::ConvertPrecision(network, Precision::U32, Precision::I32);
|
||||
|
||||
// move blobs from Constant layers to Convolution, Deconvolution, FullyConnected layers attributes
|
||||
BlobTransformation blobsTransformation;
|
||||
blobsTransformation.transform(network, true);
|
||||
@ -1254,6 +1265,10 @@ void GNAPlugin::UpdateFieldsFromConfig() {
|
||||
void GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network,
|
||||
const std::map<std::string, std::string>& config,
|
||||
InferenceEngine::QueryNetworkResult& res) const {
|
||||
if (network.getFunction()) {
|
||||
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
|
||||
}
|
||||
|
||||
std::unordered_set<CNNLayer *> allLayers;
|
||||
InferenceEngine::InputsDataMap inputs;
|
||||
|
||||
@ -1276,4 +1291,4 @@ void GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network,
|
||||
res.supportedLayersMap.insert({ layer->name, GetName() });
|
||||
}
|
||||
}, false);
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
updated_config.UpdateFromMap(config);
|
||||
auto plg = std::make_shared<GNAPlugin>(updated_config.key_config_map);
|
||||
plgPtr = plg;
|
||||
return std::make_shared<GNAExecutableNetwork>(*cloneNet(network), plg);
|
||||
return std::make_shared<GNAExecutableNetwork>(*cloneNetwork(network), plg);
|
||||
}
|
||||
|
||||
void SetConfig(const std::map<std::string, std::string> &config) override {
|
||||
|
@ -437,13 +437,15 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net
|
||||
std::unordered_set<std::string> devices;
|
||||
NodeMap<std::string> affinities;
|
||||
// Check that all nodes has user or plugin defined affinities
|
||||
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
|
||||
for (auto&& node : orderedOps) {
|
||||
auto itAffinity = queryNetworkResult.supportedLayersMap.find(node->get_friendly_name());
|
||||
if (itAffinity != queryNetworkResult.supportedLayersMap.end()) {
|
||||
affinities[node.get()] = itAffinity->second;
|
||||
if (dumpDotFile) {
|
||||
devices.insert(itAffinity->second);
|
||||
for (details::CNNNetworkIterator el(&network); el != details::CNNNetworkIterator(); el++) {
|
||||
convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
for (details::CNNNetworkIterator el(convertedNetwork.get()); el != details::CNNNetworkIterator(); el++) {
|
||||
CNNLayer::Ptr layer = *el;
|
||||
if (CaselessEq<std::string>()(layer->name, node->get_friendly_name())) {
|
||||
layer->affinity = itAffinity->second;
|
||||
@ -468,7 +470,7 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net
|
||||
|
||||
if (dumpDotFile) {
|
||||
std::ofstream ofstream{"hetero_affinity_" + _name + ".dot"};
|
||||
saveGraphToDot(network, ofstream, HeteroLayerColorer{{devices.begin(), devices.end()}});
|
||||
saveGraphToDot(*convertedNetwork, ofstream, HeteroLayerColorer{{devices.begin(), devices.end()}});
|
||||
}
|
||||
|
||||
NodeMap<InputSet> nodeInputDependencies;
|
||||
@ -689,7 +691,7 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net
|
||||
}
|
||||
if (dumpDotFile) {
|
||||
std::ofstream ofstream{"hetero_subgraphs_" + _name + ".dot"};
|
||||
dumpGraph(network, subFunctions, ofstream);
|
||||
dumpGraph(*convertedNetwork, subFunctions, ofstream);
|
||||
}
|
||||
for (auto&& network : networks) {
|
||||
auto cfg = _config;
|
||||
@ -854,18 +856,25 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) {
|
||||
}
|
||||
|
||||
auto subnetworksNode = heteroNode.append_child("subnetworks");
|
||||
std::map<std::shared_ptr<const ngraph::Function>, ::CNNNetwork> convertedNetworks;
|
||||
for (auto&& subnetwork : networks) {
|
||||
auto subnet = subnetwork._clonedNetwork;
|
||||
if (subnet.getFunction()) {
|
||||
subnet = convertedNetworks[subnet.getFunction()] =
|
||||
InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(subnetwork._clonedNetwork));
|
||||
}
|
||||
auto subnetworkNode = subnetworksNode.append_child("subnetwork");
|
||||
subnetworkNode.append_attribute("device").set_value(subnetwork._device.c_str());
|
||||
auto subnetworkInputsNode = subnetworkNode.append_child("inputs");
|
||||
auto inputInfo = subnetwork._clonedNetwork.getInputsInfo();
|
||||
auto inputInfo = subnet.getInputsInfo();
|
||||
for (auto&& input : inputInfo) {
|
||||
auto inputNode = subnetworkInputsNode.append_child("input");
|
||||
inputNode.append_attribute("name").set_value(input.first.c_str());
|
||||
inputNode.append_attribute("precision").set_value(input.second->getPrecision().name());
|
||||
}
|
||||
auto subnetworkOutputsNode = subnetworkNode.append_child("outputs");
|
||||
auto outputInfo = subnetwork._clonedNetwork.getOutputsInfo();
|
||||
auto outputInfo = subnet.getOutputsInfo();
|
||||
for (auto&& output : outputInfo) {
|
||||
auto outputNode = subnetworkOutputsNode.append_child("output");
|
||||
auto creator = getCreatorLayer(output.second).lock();
|
||||
@ -895,14 +904,18 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) {
|
||||
for (auto&& subnetwork : networks) {
|
||||
try {
|
||||
subnetwork._network.Export(heteroModel);
|
||||
} catch(InferenceEngine::details::InferenceEngineException& ie_ex) {
|
||||
} catch (InferenceEngine::details::InferenceEngineException& ie_ex) {
|
||||
if (std::string::npos != std::string{ie_ex.what()}.find(NOT_IMPLEMENTED_str)) {
|
||||
pugi::xml_document doc;
|
||||
auto dataSize = static_cast<std::uint64_t>(InferenceEngine::Serialization::FillXmlDoc(subnetwork._clonedNetwork, doc));
|
||||
auto subnet = subnetwork._clonedNetwork;
|
||||
if (subnet.getFunction()) {
|
||||
subnet = convertedNetworks[subnet.getFunction()];
|
||||
}
|
||||
auto dataSize = static_cast<std::uint64_t>(InferenceEngine::Serialization::FillXmlDoc(subnet, doc));
|
||||
doc.save(heteroModel, nullptr, pugi::format_raw);
|
||||
heteroModel << std::endl;
|
||||
heteroModel.write(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
|
||||
InferenceEngine::Serialization::SerializeBlobs(heteroModel, subnetwork._clonedNetwork);
|
||||
InferenceEngine::Serialization::SerializeBlobs(heteroModel, subnet);
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
|
@ -57,20 +57,23 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const
|
||||
}
|
||||
DeviceMetaInformationMap metaDevices = GetDevicePlugins(it->second, tconfig);
|
||||
|
||||
if (auto function = network.getFunction()) {
|
||||
auto anyDeviceDoNotSupportNgraph =
|
||||
std::any_of(std::begin(metaDevices), std::end(metaDevices),
|
||||
[&] (const DeviceMetaInformationMap::value_type& metaDevice) {
|
||||
if (network.getFunction()) {
|
||||
auto allSupportsNgraph =
|
||||
std::all_of(std::begin(metaDevices), std::end(metaDevices),
|
||||
[&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool {
|
||||
auto& deviceName = metaDevice.first;
|
||||
auto clonedNetwork = cloneNetwork(network);
|
||||
GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second);
|
||||
return (clonedNetwork->getFunction() == nullptr);
|
||||
try { GetCore()->QueryNetwork(network, deviceName, metaDevice.second); }
|
||||
catch (const InferenceEngine::details::InferenceEngineException & ex) {
|
||||
std::string message = ex.what();
|
||||
return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
if (anyDeviceDoNotSupportNgraph) {
|
||||
if (!allSupportsNgraph) {
|
||||
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
|
||||
return std::make_shared<HeteroExecutableNetwork>(
|
||||
*cnnNetworkImpl,
|
||||
mergeConfigs(_config, config), this);
|
||||
*cnnNetworkImpl, mergeConfigs(_config, config), this);
|
||||
} else {
|
||||
return std::make_shared<HeteroExecutableNetwork>(*cloneNetwork(network), mergeConfigs(_config, config), this);
|
||||
}
|
||||
@ -205,11 +208,37 @@ void Engine::QueryNetwork(const ICNNNetwork &network, const Configs& config, Que
|
||||
DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig);
|
||||
|
||||
std::map<std::string, QueryNetworkResult> queryResults;
|
||||
// go over devices and call query network
|
||||
for (auto&& metaDevice : metaDevices) {
|
||||
auto& deviceName = metaDevice.first;
|
||||
auto clonedNetwork = cloneNetwork(network);
|
||||
queryResults[deviceName] = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second);
|
||||
auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) {
|
||||
// go over devices and call query network
|
||||
for (auto&& metaDevice : metaDevices) {
|
||||
auto& deviceName = metaDevice.first;
|
||||
auto clonedNetwork = cloneNetwork(networkObject);
|
||||
queryResults[deviceName] = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second);
|
||||
}
|
||||
return queryResults;
|
||||
};
|
||||
|
||||
if (network.getFunction()) {
|
||||
auto allSupportsNgraph =
|
||||
std::all_of(std::begin(metaDevices), std::end(metaDevices),
|
||||
[&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool {
|
||||
auto& deviceName = metaDevice.first;
|
||||
auto clonedNetwork = cloneNetwork(network);
|
||||
try { GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second); }
|
||||
catch (const InferenceEngine::details::InferenceEngineException & ex) {
|
||||
std::string message = ex.what();
|
||||
return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
if (!allSupportsNgraph) {
|
||||
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
|
||||
queryNetwork(*cnnNetworkImpl);
|
||||
} else {
|
||||
queryNetwork(network);
|
||||
}
|
||||
} else {
|
||||
queryNetwork(network);
|
||||
}
|
||||
|
||||
// WARNING: Here is devices with user set priority
|
||||
|
@ -118,32 +118,17 @@ CNNLayerWeakPtr& InferenceEngine::getCreatorLayer(const DataPtr & data) {
|
||||
}
|
||||
|
||||
std::map<std::string, CNNLayerPtr>& InferenceEngine::getInputTo(const DataPtr & data) {
|
||||
if (auto ndata = std::dynamic_pointer_cast<details::NGraphData>(data)) {
|
||||
return ndata->getInputTo();
|
||||
} else {
|
||||
return data->_impl->inputTo;
|
||||
}
|
||||
return data->_impl->inputTo;
|
||||
}
|
||||
|
||||
std::map<std::string, CNNLayerPtr>& InferenceEngine::getInputTo(Data * data) {
|
||||
if (auto ndata = dynamic_cast<details::NGraphData *>(data)) {
|
||||
return ndata->getInputTo();
|
||||
} else {
|
||||
return data->_impl->inputTo;
|
||||
}
|
||||
return data->_impl->inputTo;
|
||||
}
|
||||
|
||||
CNNLayerWeakPtr& details::NGraphData::getCreatorLayer() {
|
||||
if (_impl->creatorLayer.lock() == nullptr && network != nullptr) {
|
||||
network->convertToCNNNetworkImpl();
|
||||
}
|
||||
return _impl->creatorLayer;
|
||||
}
|
||||
|
||||
std::map<std::string, CNNLayerPtr>& details::NGraphData::getInputTo() {
|
||||
if (_impl->inputTo.empty() && network != nullptr) {
|
||||
network->convertToCNNNetworkImpl();
|
||||
}
|
||||
|
||||
return _impl->inputTo;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <cnn_network_ngraph_impl.hpp>
|
||||
#include <precision_utils.h>
|
||||
#include <cpp/ie_cnn_network.h>
|
||||
#include <cnn_network_impl.hpp>
|
||||
|
||||
#include <limits>
|
||||
#include <cmath>
|
||||
@ -151,7 +152,8 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
|
||||
std::map<std::string, std::vector<TensorDesc>> layer_name_to_tensor_desc;
|
||||
{
|
||||
auto tiBody = std::make_shared<details::TINGraphBody>(std::make_shared<ngraph::Function>(results, parameters));
|
||||
CNNNetwork net(tiBody);
|
||||
CNNNetwork ngraphNet(tiBody);
|
||||
CNNNetwork net(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(ngraphNet));
|
||||
// Paranoid check for cycles
|
||||
bool res = CNNNetForestDFS(
|
||||
CNNNetGetAllInputLayers(net), [](const CNNLayerPtr& layer) {}, false);
|
||||
|
@ -521,19 +521,49 @@ void MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork&
|
||||
|
||||
DeviceMap<DeviceInformation> metaDevices = ParseMetaDevices(priorities->second, fullConfig);
|
||||
std::unordered_set<std::string> supportedLayers;
|
||||
|
||||
auto allSupportsNgraph =
|
||||
std::all_of(std::begin(metaDevices), std::end(metaDevices),
|
||||
[&] (const DeviceMap<DeviceInformation>::value_type & value) -> bool {
|
||||
auto& deviceName = value.first;
|
||||
auto& metaDevice = value.second;
|
||||
auto clonedNetwork = cloneNetwork(network);
|
||||
try { GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.config); }
|
||||
catch (const InferenceEngine::details::InferenceEngineException & ex) {
|
||||
std::string message = ex.what();
|
||||
return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
for (auto&& value : metaDevices) {
|
||||
auto& deviceName = value.first;
|
||||
auto& metaDevice = value.second;
|
||||
auto clonedNetwork = cloneNetwork(network);
|
||||
auto deviceQr = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.config);
|
||||
std::unordered_set<std::string> deviceSupportedLayers;
|
||||
for (auto&& layerQr : deviceQr.supportedLayersMap) {
|
||||
deviceSupportedLayers.emplace(layerQr.first);
|
||||
|
||||
auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) {
|
||||
auto clonedNetwork = cloneNetwork(networkObject);
|
||||
auto deviceQr = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.config);
|
||||
std::unordered_set<std::string> deviceSupportedLayers;
|
||||
for (auto&& layerQr : deviceQr.supportedLayersMap) {
|
||||
deviceSupportedLayers.emplace(layerQr.first);
|
||||
}
|
||||
supportedLayers = supportedLayers.empty()
|
||||
? deviceSupportedLayers : (deviceSupportedLayers.empty()
|
||||
? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
|
||||
};
|
||||
|
||||
if (network.getFunction()) {
|
||||
if (!allSupportsNgraph) {
|
||||
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
|
||||
queryNetwork(*cnnNetworkImpl);
|
||||
} else {
|
||||
queryNetwork(network);
|
||||
}
|
||||
} else {
|
||||
queryNetwork(network);
|
||||
}
|
||||
supportedLayers = supportedLayers.empty()
|
||||
? deviceSupportedLayers : (deviceSupportedLayers.empty()
|
||||
? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
|
||||
}
|
||||
|
||||
for (auto&& supportedLayer : supportedLayers) {
|
||||
queryResult.supportedLayersMap[supportedLayer] = GetName();
|
||||
}
|
||||
|
@ -87,6 +87,10 @@ void Engine::QueryNetwork(
|
||||
VPU_THROW_UNLESS(!(std::find(deviceIDs.begin(), deviceIDs.end(), deviceName) == deviceIDs.end()), "Myriad device: {} not found.", deviceName);
|
||||
}
|
||||
|
||||
if (network.getFunction()) {
|
||||
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
|
||||
}
|
||||
|
||||
const auto log = std::make_shared<Logger>(
|
||||
"GraphCompiler",
|
||||
parsedConfigCopy.logLevel(),
|
||||
|
@ -107,11 +107,13 @@ TEST(CNNNGraphImplTests, TestGetOutputAfterConvertNetwork) {
|
||||
|
||||
InferenceEngine::CNNNetwork cnnNet(ngraph);
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
cnnNet.addOutput(testLayerName);
|
||||
InferenceEngine::CNNNetwork convertedNetwork(std::make_shared<details::CNNNetworkImpl>(cnnNet));
|
||||
convertedNetwork.addOutput(testLayerName);
|
||||
|
||||
InferenceEngine::OutputsDataMap outs = cnnNet.getOutputsInfo();
|
||||
ASSERT_EQ(2, outs.size());
|
||||
InferenceEngine::OutputsDataMap convertedOuts = convertedNetwork.getOutputsInfo();
|
||||
ASSERT_EQ(1, outs.size());
|
||||
ASSERT_EQ(2, convertedOuts.size());
|
||||
}
|
||||
|
||||
TEST(CNNNGraphImplTests, TestSetCurrentBatch) {
|
||||
@ -178,7 +180,8 @@ TEST(CNNNGraphImplTests, TestSaveAffinity) {
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork cnnNet(ngraph);
|
||||
auto cnnLayer = CommonTestUtils::getLayerByName(cnnNet, "testReLU");
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet);
|
||||
auto cnnLayer = CommonTestUtils::getLayerByName(convertedNetwork.get(), "testReLU");
|
||||
ASSERT_NE(nullptr, cnnLayer);
|
||||
ASSERT_EQ(cnnLayer->affinity, testAffinity);
|
||||
}
|
||||
@ -242,8 +245,8 @@ TEST(CNNNGraphImplTests, TestAddOutputFromConvertedNetwork) {
|
||||
ASSERT_NE(nullptr, cnnNet.getFunction());
|
||||
ASSERT_EQ(5, cnnNet.layerCount());
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
auto outputs = cnnNet.getOutputsInfo();
|
||||
InferenceEngine::CNNNetwork convertedNetwork(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
|
||||
auto outputs = convertedNetwork.getOutputsInfo();
|
||||
ASSERT_EQ(2, outputs.size());
|
||||
ASSERT_TRUE(outputs.find("relu2") != outputs.end());
|
||||
ASSERT_TRUE(outputs.find(testLayerName) != outputs.end());
|
||||
@ -267,9 +270,8 @@ TEST(CNNNGraphImplTests, ConstantAsInternalAndExternalLayer) {
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork cnnNet(ngraph);
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
ASSERT_EQ(4, cnnNet.layerCount());
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet);
|
||||
ASSERT_EQ(4, convertedNetwork->layerCount());
|
||||
}
|
||||
|
||||
TEST(CNNNGraphImplTests, SaveInputInfoAfterConversion) {
|
||||
@ -357,8 +359,9 @@ TEST(CNNNGraphImplTests, SavePrimitivesPriority) {
|
||||
|
||||
auto network = ie.ReadNetwork(model, weights);
|
||||
auto inputInfo = network.getInputsInfo();
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
auto cnnLayer = getCreatorLayer(inputInfo.begin()->second->getInputData()).lock();
|
||||
ASSERT_TRUE(cnnLayer);
|
||||
ASSERT_NE(nullptr, cnnLayer);
|
||||
ASSERT_NE(cnnLayer->params.find("PrimitivesPriority"), cnnLayer->params.end());
|
||||
ASSERT_EQ("cpu:avx2", cnnLayer->params["PrimitivesPriority"]);
|
||||
}
|
||||
@ -449,16 +452,18 @@ TEST(CNNNGraphImplTests, CanChangeInputPrecision) {
|
||||
|
||||
inputsInfo.at("input")->setPrecision(Precision::FP16);
|
||||
}
|
||||
InferenceEngine::CNNNetwork convertedNetwork;
|
||||
{
|
||||
SCOPED_TRACE("Convert to old format");
|
||||
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
convertedNetwork = InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
|
||||
}
|
||||
{
|
||||
SCOPED_TRACE("After conversion");
|
||||
|
||||
const auto inputsInfo = cnnNet.getInputsInfo();
|
||||
const auto inputsInfo = convertedNetwork.getInputsInfo();
|
||||
|
||||
ASSERT_EQ(inputsInfo.at("input")->getPrecision(), Precision::FP16)
|
||||
<< "Manually set presision should be left unchanged";
|
||||
@ -496,16 +501,18 @@ TEST(CNNNGraphImplTests, CanChangeInputLayout) {
|
||||
|
||||
inputsInfo.at("input")->setLayout(Layout::NHWC);
|
||||
}
|
||||
InferenceEngine::CNNNetwork convertedNetwork;
|
||||
{
|
||||
SCOPED_TRACE("Convert to old format");
|
||||
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
convertedNetwork = InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
|
||||
}
|
||||
{
|
||||
SCOPED_TRACE("After conversion");
|
||||
|
||||
const auto inputsInfo = cnnNet.getInputsInfo();
|
||||
const auto inputsInfo = convertedNetwork.getInputsInfo();
|
||||
|
||||
ASSERT_EQ(inputsInfo.at("input")->getLayout(), Layout::NHWC)
|
||||
<< "Manually set layout should be left unchanged";
|
||||
@ -543,16 +550,18 @@ TEST(CNNNGraphImplTests, CanChangeOutputPrecision) {
|
||||
|
||||
outputsInfo.at("output")->setPrecision(Precision::FP16);
|
||||
}
|
||||
InferenceEngine::CNNNetwork convertedNetwork;
|
||||
{
|
||||
SCOPED_TRACE("Convert to old format");
|
||||
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
convertedNetwork = InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
|
||||
}
|
||||
{
|
||||
SCOPED_TRACE("After conversion");
|
||||
|
||||
const auto outputsInfo = cnnNet.getOutputsInfo();
|
||||
const auto outputsInfo = convertedNetwork.getOutputsInfo();
|
||||
|
||||
ASSERT_EQ(outputsInfo.at("output")->getPrecision(), Precision::FP16)
|
||||
<< "Manually set presision should be left unchanged";
|
||||
@ -590,16 +599,18 @@ TEST(CNNNGraphImplTests, CanChangeOutputLayout) {
|
||||
|
||||
outputsInfo.at("output")->setLayout(Layout::NHWC);
|
||||
}
|
||||
InferenceEngine::CNNNetwork convertedNetwork;
|
||||
{
|
||||
SCOPED_TRACE("Convert to old format");
|
||||
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
|
||||
convertedNetwork = InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
|
||||
}
|
||||
{
|
||||
SCOPED_TRACE("After conversion");
|
||||
|
||||
const auto outputsInfo = cnnNet.getOutputsInfo();
|
||||
const auto outputsInfo = convertedNetwork.getOutputsInfo();
|
||||
|
||||
ASSERT_EQ(outputsInfo.at("output")->getLayout(), Layout::NHWC)
|
||||
<< "Manually set layout should be left unchanged";
|
||||
|
@ -61,8 +61,9 @@ protected:
|
||||
void compareWithRef(const InferenceEngine::CNNNetwork &network,
|
||||
const std::vector<InferenceEngine::CNNLayerPtr> &refLayersVec) {
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
ASSERT_NO_THROW(FuncTestUtils::compareLayerByLayer<std::vector<InferenceEngine::CNNLayerPtr>>(
|
||||
InferenceEngine::details::CNNNetSortTopologically(network),
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
ASSERT_NO_THROW(FuncTestUtils::compareLayerByLayer(
|
||||
InferenceEngine::details::CNNNetSortTopologically(*convertedNetwork),
|
||||
refLayersVec, false));
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
@ -42,7 +42,8 @@ TEST_P(CNNNetworkSerializerTest, Serialize) {
|
||||
{
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
// convert to old representation
|
||||
getCreatorLayer(originalNetwork.getInputsInfo().begin()->second->getInputData());
|
||||
originalNetwork = InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(originalNetwork));
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
originalNetwork.getInputsInfo().begin()->second->setPrecision(_netPrc);
|
||||
@ -68,7 +69,8 @@ TEST_P(CNNNetworkSerializerTest, Serialize) {
|
||||
|
||||
TEST_P(CNNNetworkSerializerTest, TopoSortResultUnique) {
|
||||
InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::makeConvPoolRelu());
|
||||
auto sorted = InferenceEngine::Serialization::TopologicalSort(network);
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
auto sorted = InferenceEngine::Serialization::TopologicalSort(*convertedNetwork);
|
||||
|
||||
std::vector<std::string> actualLayerNames;
|
||||
for (auto&& layer : sorted) {
|
||||
@ -77,8 +79,9 @@ TEST_P(CNNNetworkSerializerTest, TopoSortResultUnique) {
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
std::vector<std::string> expectedLayerNames = {
|
||||
"Param_1", "Const_1", "Reshape_1", "Conv_1", "Pool_1", "Relu_1", "Const_2", "Reshape_2"
|
||||
const std::vector<std::string> expectedLayerNames = {
|
||||
"Param_1", "Const_1", "Reshape_1", "Conv_1",
|
||||
"Pool_1", "Relu_1", "Const_2", "Reshape_2"
|
||||
};
|
||||
|
||||
ASSERT_EQ(expectedLayerNames, actualLayerNames);
|
||||
|
@ -258,6 +258,7 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnn.getInputsInfo().begin()->second->getInputData());
|
||||
auto convertedNetwork = std::make_shared<details::CNNNetworkImpl>(cnn);
|
||||
(void)convertedNetwork;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
@ -39,13 +39,17 @@ protected:
|
||||
auto network = ie.ReadNetwork(modelV10, weights);
|
||||
auto cnnNetwork = ie.ReadNetwork(oldModel, weights);
|
||||
|
||||
FuncTestUtils::compareCNNNetworks(network, cnnNetwork, false);
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
for (auto it = details::CNNNetworkIterator(network); it != details::CNNNetworkIterator(); it++) {
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
|
||||
FuncTestUtils::compareCNNNetworks(InferenceEngine::CNNNetwork(convertedNetwork), cnnNetwork, false);
|
||||
|
||||
for (auto it = details::CNNNetworkIterator(convertedNetwork.get()); it != details::CNNNetworkIterator(); it++) {
|
||||
InferenceEngine::CNNLayerPtr layer = *it;
|
||||
ASSERT_NE(nullptr, layer->getNode());
|
||||
}
|
||||
|
||||
ASSERT_EQ(nullptr, cnnNetwork.getFunction());
|
||||
for (auto it = details::CNNNetworkIterator(cnnNetwork); it != details::CNNNetworkIterator(); it++) {
|
||||
InferenceEngine::CNNLayerPtr layer = *it;
|
||||
ASSERT_EQ(nullptr, layer->getNode());
|
||||
|
@ -223,6 +223,7 @@ TEST_F(NGraphReaderTests, ReadReLUScalarNetwork) {
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnn.getInputsInfo().begin()->second->getInputData());
|
||||
auto convertedNetwork = std::make_shared<details::CNNNetworkImpl>(cnn);
|
||||
(void)convertedNetwork;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
@ -283,7 +283,8 @@ TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension1) {
|
||||
SizeVector outDims = output["activation"]->getTensorDesc().getDims();
|
||||
ASSERT_EQ(outDims, refAfterReshape);
|
||||
// Convert to CNNNetwork
|
||||
auto layer = CommonTestUtils::getLayerByName(network, "activation");
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
auto layer = CommonTestUtils::getLayerByName(convertedNetwork.get(), "activation");
|
||||
ASSERT_EQ("CustomTestLayer", layer->type);
|
||||
}
|
||||
|
||||
@ -353,7 +354,8 @@ TEST_F(NGraphReshapeTests, ReshapeNewIRWithNewExtension2) {
|
||||
SizeVector outDims = output["activation"]->getTensorDesc().getDims();
|
||||
ASSERT_EQ(outDims, refAfterReshape);
|
||||
// Convert to CNNNetwork
|
||||
auto layer = CommonTestUtils::getLayerByName(network, "activation");
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
auto layer = CommonTestUtils::getLayerByName(convertedNetwork.get(), "activation");
|
||||
ASSERT_EQ("CustomTestLayer", layer->type);
|
||||
ASSERT_EQ("false", layer->params["test1"]);
|
||||
ASSERT_EQ("3", layer->params["test2"]);
|
||||
|
@ -52,8 +52,8 @@ InferenceEngine::details::LowPrecisionTransformations LayerTransformation::getLo
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
|
||||
auto net1 = InferenceEngine::CNNNetwork(function);
|
||||
std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(net1);
|
||||
auto ngraphNetwork = InferenceEngine::CNNNetwork(function);
|
||||
std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(ngraphNetwork);
|
||||
|
||||
if (clonedNetwork->getFunction()) {
|
||||
const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
|
||||
@ -112,7 +112,9 @@ InferenceEngine::Precision LayerTransformation::getDeviceInternalPrecision(const
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
|
||||
InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
|
||||
// convert to old representation
|
||||
InferenceEngine::CNNNetwork ngraphNetwork(function);
|
||||
auto cnnNetworkImp = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(ngraphNetwork);
|
||||
|
||||
InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
|
||||
InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
|
||||
|
@ -58,8 +58,8 @@ InferenceEngine::details::LowPrecisionTransformations LayerTransformation::getLo
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
|
||||
auto net1 = InferenceEngine::CNNNetwork(function);
|
||||
std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(net1);
|
||||
auto ngraphNetwork = InferenceEngine::CNNNetwork(function);
|
||||
std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(ngraphNetwork);
|
||||
|
||||
if (clonedNetwork->getFunction()) {
|
||||
const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
|
||||
@ -102,7 +102,9 @@ InferenceEngine::Precision LayerTransformation::getDeviceInternalPrecision(const
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
|
||||
InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
|
||||
// convert to old representation
|
||||
InferenceEngine::CNNNetwork ngraphNetwork(function);
|
||||
auto cnnNetworkImp = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(ngraphNetwork);
|
||||
|
||||
InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32);
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "ngraph/function.hpp"
|
||||
|
||||
#include "cpp/ie_cnn_network.h"
|
||||
#include "cnn_network_impl.hpp"
|
||||
#include "ie_common.h"
|
||||
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
@ -43,8 +44,8 @@ protected:
|
||||
|
||||
private:
|
||||
void triggerConversionToCNNNetwork() {
|
||||
// convert to old representation
|
||||
getCreatorLayer(cnnNetwork.getInputsInfo().begin()->second->getInputData());
|
||||
cnnNetwork = InferenceEngine::CNNNetwork(
|
||||
std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNetwork));
|
||||
}
|
||||
|
||||
static const char s_FriendlyName[];
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <ie_plugin_config.hpp>
|
||||
#include <memory>
|
||||
#include <fstream>
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <hetero/hetero_plugin_config.hpp>
|
||||
#include <graph_tools.hpp>
|
||||
#include <functional_test_utils/plugin_cache.hpp>
|
||||
@ -21,6 +22,7 @@
|
||||
|
||||
#include <functional_test_utils/skip_tests_config.hpp>
|
||||
#include <common_test_utils/common_utils.hpp>
|
||||
#include <common_test_utils/test_assertions.hpp>
|
||||
|
||||
#ifdef ENABLE_UNICODE_PATH_SUPPORT
|
||||
#include <iostream>
|
||||
@ -104,33 +106,22 @@ public:
|
||||
}
|
||||
}
|
||||
void setHeteroNetworkAffinity(const std::string& targetDevice) {
|
||||
InferenceEngine::InputsDataMap networkInputs = actualNetwork.getInputsInfo();
|
||||
|
||||
CNNLayerPtr layer;
|
||||
for (auto input : networkInputs) {
|
||||
InputInfo::Ptr q = input.second;
|
||||
DataPtr p = q->getInputData();
|
||||
layer = getInputTo(p).begin()->second;
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> deviceMapping = {
|
||||
{"Convololution_4", targetDevice},
|
||||
{"Convololution_7", CommonTestUtils::DEVICE_CPU},
|
||||
const std::map<std::string, std::string> deviceMapping = {
|
||||
{"Split_2", targetDevice},
|
||||
{"Convolution_4", targetDevice},
|
||||
{"Convolution_7", CommonTestUtils::DEVICE_CPU},
|
||||
{"Relu_5", CommonTestUtils::DEVICE_CPU},
|
||||
{"Relu_8", targetDevice},
|
||||
{"Concat_9", CommonTestUtils::DEVICE_CPU}
|
||||
};
|
||||
|
||||
CNNNetDFS(layer, [&](const CNNLayerPtr &layer) {
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
auto it = deviceMapping.find(layer->name);
|
||||
for (const auto & op : actualNetwork.getFunction()->get_ops()) {
|
||||
auto it = deviceMapping.find(op->get_friendly_name());
|
||||
if (it != deviceMapping.end()) {
|
||||
layer->affinity = it->second;
|
||||
} else {
|
||||
layer->affinity = CommonTestUtils::DEVICE_CPU;
|
||||
std::string affinity = it->second;
|
||||
op->get_rt_info()["affinity"] = std::make_shared<ngraph::VariantWrapper<std::string>>(affinity);
|
||||
}
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -545,6 +536,7 @@ TEST_P(IEClassImportExportTestP, smoke_ExportUsingFileNameImportFromStreamNoThro
|
||||
//
|
||||
// QueryNetwork
|
||||
//
|
||||
|
||||
TEST_P(IEClassNetworkTestP, QueryNetworkActualThrows) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
Core ie;
|
||||
@ -554,7 +546,13 @@ TEST_P(IEClassNetworkTestP, QueryNetworkActualThrows) {
|
||||
TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
Core ie;
|
||||
ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, deviceName));
|
||||
|
||||
try {
|
||||
ie.QueryNetwork(actualNetwork, deviceName);
|
||||
} catch (const InferenceEngine::details::InferenceEngineException & ex) {
|
||||
std::string message = ex.what();
|
||||
ASSERT_STR_CONTAINS(message, "[NOT_IMPLEMENTED] ngraph::Function is not supported natively");
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) {
|
||||
@ -1136,7 +1134,12 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) {
|
||||
Core ie;
|
||||
|
||||
if (supportsDeviceID(ie, deviceName)) {
|
||||
ASSERT_NO_THROW(ie.QueryNetwork(simpleNetwork, deviceName + ".0"));
|
||||
try {
|
||||
ie.QueryNetwork(simpleNetwork, deviceName + ".0");
|
||||
} catch (const InferenceEngine::details::InferenceEngineException & ex) {
|
||||
std::string message = ex.what();
|
||||
ASSERT_STR_CONTAINS(message, "[NOT_IMPLEMENTED] ngraph::Function is not supported natively");
|
||||
}
|
||||
} else {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
@ -1325,10 +1328,9 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROwithMULTINoThrow_v7) {
|
||||
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
|
||||
{"TARGET_FALLBACK", targetFallback}}));
|
||||
|
||||
auto convertedActualNetwork = std::make_shared<details::CNNNetworkImpl>(actualNetwork);
|
||||
for (auto &&layer : result.supportedLayersMap) {
|
||||
// IE_SUPPRESS_DEPRECATED_START
|
||||
EXPECT_NO_THROW(CommonTestUtils::getLayerByName(actualNetwork, layer.first));
|
||||
// IE_SUPPRESS_DEPRECATED_END
|
||||
EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first));
|
||||
}
|
||||
} else {
|
||||
GTEST_SKIP();
|
||||
@ -1354,10 +1356,9 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIwithHETERONoThrowv7) {
|
||||
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
|
||||
{"TARGET_FALLBACK", deviceName + "," + CommonTestUtils::DEVICE_CPU}}));
|
||||
|
||||
auto convertedActualNetwork = std::make_shared<details::CNNNetworkImpl>(actualNetwork);
|
||||
for (auto &&layer : result.supportedLayersMap) {
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
EXPECT_NO_THROW(CommonTestUtils::getLayerByName(actualNetwork, layer.first));
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first));
|
||||
}
|
||||
} else {
|
||||
GTEST_SKIP();
|
||||
|
@ -280,7 +280,7 @@ namespace FuncTestUtils {
|
||||
return l->name < r->name;
|
||||
});
|
||||
|
||||
compareLayerByLayer<std::vector<InferenceEngine::CNNLayerPtr>>(nodes_new, nodes_old, sameNetVersions);
|
||||
compareLayerByLayer(nodes_new, nodes_old, sameNetVersions);
|
||||
|
||||
auto get_map = [](
|
||||
const std::vector<InferenceEngine::DataPtr> &data) -> std::map<std::string, InferenceEngine::DataPtr> {
|
||||
@ -313,7 +313,7 @@ namespace FuncTestUtils {
|
||||
THROW_IE_EXCEPTION << "CNNNetworks have different batch size! " << std::to_string(network.getBatchSize())
|
||||
<< " and " << std::to_string(refNetwork.getBatchSize());
|
||||
|
||||
compareLayerByLayer<InferenceEngine::CNNNetwork>(network, refNetwork, sameNetVersions);
|
||||
compareLayerByLayer(network, refNetwork, sameNetVersions);
|
||||
InferenceEngine::InputsDataMap newInput = network.getInputsInfo();
|
||||
InferenceEngine::InputsDataMap oldInput = refNetwork.getInputsInfo();
|
||||
InferenceEngine::OutputsDataMap newOutput = network.getOutputsInfo();
|
||||
@ -322,4 +322,56 @@ namespace FuncTestUtils {
|
||||
compareInfo<InferenceEngine::OutputsDataMap>(newOutput, oldOutput, "CNNNetworks have different outputs!");
|
||||
}
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
|
||||
void compareLayerByLayer(const std::vector<InferenceEngine::CNNLayerPtr>& network,
|
||||
const std::vector<InferenceEngine::CNNLayerPtr>& refNetwork,
|
||||
bool sameNetVersions) {
|
||||
auto iterator = network.begin();
|
||||
auto refIterator = refNetwork.begin();
|
||||
if (network.size() != refNetwork.size())
|
||||
THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " <<
|
||||
network.size() << " vs " << refNetwork.size();
|
||||
for (; iterator != network.end() && refIterator != refNetwork.end(); iterator++, refIterator++) {
|
||||
InferenceEngine::CNNLayerPtr layer = *iterator;
|
||||
InferenceEngine::CNNLayerPtr refLayer = *refIterator;
|
||||
compareCNNNLayers(layer, refLayer, sameNetVersions);
|
||||
}
|
||||
}
|
||||
|
||||
void compareLayerByLayer(const InferenceEngine::CNNNetwork& network,
|
||||
const InferenceEngine::CNNNetwork& refNetwork,
|
||||
bool sameNetVersions) {
|
||||
InferenceEngine::details::CNNNetworkIterator iterator, refIterator, end;
|
||||
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork, convertedRefNetwork;
|
||||
|
||||
auto convertNetwork = [] (const InferenceEngine::CNNNetwork & net,
|
||||
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> & convertedNet,
|
||||
InferenceEngine::details::CNNNetworkIterator & it) {
|
||||
if (net.getFunction()) {
|
||||
convertedNet.reset(new InferenceEngine::details::CNNNetworkImpl(net));
|
||||
it = InferenceEngine::details::CNNNetworkIterator(convertedNet.get());
|
||||
} else {
|
||||
it = InferenceEngine::details::CNNNetworkIterator(net);
|
||||
}
|
||||
};
|
||||
|
||||
convertNetwork(network, convertedNetwork, iterator);
|
||||
convertNetwork(refNetwork, convertedRefNetwork, refIterator);
|
||||
|
||||
size_t layerCount = convertedNetwork ? convertedNetwork->layerCount() : network.layerCount();
|
||||
size_t layerRefCount = convertedRefNetwork ? convertedRefNetwork->layerCount() : refNetwork.layerCount();
|
||||
|
||||
if (layerCount != layerRefCount)
|
||||
THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " << layerCount << " vs " << layerRefCount;
|
||||
for (; iterator != end && refIterator != end; iterator++, refIterator++) {
|
||||
InferenceEngine::CNNLayerPtr layer = *iterator;
|
||||
InferenceEngine::CNNLayerPtr refLayer = *refIterator;
|
||||
compareCNNNLayers(layer, refLayer, sameNetVersions);
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
} // namespace FuncTestUtils
|
||||
|
@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "cpp/ie_cnn_network.h"
|
||||
#include "cnn_network_impl.hpp"
|
||||
#include "details/ie_cnn_network_iterator.hpp"
|
||||
|
||||
namespace FuncTestUtils {
|
||||
@ -13,37 +14,12 @@ void compareCNNNetworks(const InferenceEngine::CNNNetwork &network, const Infere
|
||||
|
||||
void compareCNNNLayers(const InferenceEngine::CNNLayerPtr &layer, const InferenceEngine::CNNLayerPtr &refLayer, bool sameNetVersions);
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
template <class T>
|
||||
inline void compareLayerByLayer(const T& network, const T& refNetwork, bool sameNetVersions = true) {
|
||||
auto iterator = InferenceEngine::details::CNNNetworkIterator(network);
|
||||
auto refIterator = InferenceEngine::details::CNNNetworkIterator(refNetwork);
|
||||
auto end = InferenceEngine::details::CNNNetworkIterator();
|
||||
if (network.layerCount() != refNetwork.layerCount())
|
||||
THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " << network.layerCount() << " vs " << refNetwork.layerCount();
|
||||
for (; iterator != end && refIterator != end; iterator++, refIterator++) {
|
||||
InferenceEngine::CNNLayerPtr layer = *iterator;
|
||||
InferenceEngine::CNNLayerPtr refLayer = *refIterator;
|
||||
compareCNNNLayers(layer, refLayer, sameNetVersions);
|
||||
}
|
||||
}
|
||||
void compareLayerByLayer(const InferenceEngine::CNNNetwork& network,
|
||||
const InferenceEngine::CNNNetwork& refNetwork,
|
||||
bool sameNetVersions = true);
|
||||
|
||||
template <>
|
||||
inline void compareLayerByLayer(const std::vector<InferenceEngine::CNNLayerPtr>& network,
|
||||
const std::vector<InferenceEngine::CNNLayerPtr>& refNetwork,
|
||||
bool sameNetVersions) {
|
||||
auto iterator = network.begin();
|
||||
auto refIterator = refNetwork.begin();
|
||||
if (network.size() != refNetwork.size())
|
||||
THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " <<
|
||||
network.size() << " vs " << refNetwork.size();
|
||||
for (; iterator != network.end() && refIterator != refNetwork.end(); iterator++, refIterator++) {
|
||||
InferenceEngine::CNNLayerPtr layer = *iterator;
|
||||
InferenceEngine::CNNLayerPtr refLayer = *refIterator;
|
||||
compareCNNNLayers(layer, refLayer, sameNetVersions);
|
||||
}
|
||||
}
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
void compareLayerByLayer(const std::vector<InferenceEngine::CNNLayerPtr>& network,
|
||||
const std::vector<InferenceEngine::CNNLayerPtr>& refNetwork,
|
||||
bool sameNetVersions = true);
|
||||
|
||||
} // namespace FuncTestUtils
|
@ -402,7 +402,11 @@ protected:
|
||||
// PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE,
|
||||
// transformationsParams.transformationsInPluginEnabled ? PluginConfigParams::YES : PluginConfigParams::NO);
|
||||
|
||||
usedNetwork = cloneNet(network);
|
||||
if (network.getFunction()) {
|
||||
usedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
} else {
|
||||
usedNetwork = cloneNet(network);
|
||||
}
|
||||
ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.deviceName, config);
|
||||
InferRequest inferRequest = exeNetwork.CreateInferRequest();
|
||||
if (inputs.empty()) {
|
||||
|
@ -215,6 +215,7 @@ void SingleLayerTransformationsTest::SetUp() {
|
||||
const SingleLayerTransformationsTestParams p = ::testing::WithParamInterface<SingleLayerTransformationsTestParams>::GetParam();
|
||||
// TODO: ONNX enabling
|
||||
CNNNetwork network = createNetwork();
|
||||
ASSERT_EQ(nullptr, network.getFunction());
|
||||
|
||||
const auto inputsInfo = network.getInputsInfo();
|
||||
std::unordered_map<std::string, Blob::Ptr> inputBlobs;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#endif
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <cnn_network_impl.hpp>
|
||||
#include <nodes/list.hpp>
|
||||
#include <mkldnn_graph.h>
|
||||
#include <mkldnn_memory.h>
|
||||
@ -212,12 +213,24 @@ public:
|
||||
|
||||
void CreateGraph(InferenceEngine::ICNNNetwork &network, const MKLDNNPlugin::MKLDNNExtensionManager::Ptr& extMgr,
|
||||
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache = {}) {
|
||||
MKLDNNGraph::CreateGraph(network, extMgr, cache);
|
||||
if (network.getFunction()) {
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
MKLDNNGraph::CreateGraph(static_cast<InferenceEngine::ICNNNetwork&>(*convertedNetwork),
|
||||
extMgr, cache);
|
||||
} else {
|
||||
MKLDNNGraph::CreateGraph(network, extMgr, cache);
|
||||
}
|
||||
}
|
||||
|
||||
void CreateGraph(InferenceEngine::ICNNNetwork &network) {
|
||||
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
|
||||
MKLDNNGraph::CreateGraph(network, extensionManager, cache);
|
||||
if (network.getFunction()) {
|
||||
auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
|
||||
MKLDNNGraph::CreateGraph(static_cast<InferenceEngine::ICNNNetwork&>(*convertedNetwork),
|
||||
extensionManager, cache);
|
||||
} else {
|
||||
MKLDNNGraph::CreateGraph(network, extensionManager, cache);
|
||||
}
|
||||
}
|
||||
|
||||
void checkDynBatch(InferenceEngine::BlobMap& srcs, InferenceEngine::BlobMap& outputBlobs, int batch, size_t MB,
|
||||
|
Loading…
Reference in New Issue
Block a user